code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
def snake_case ( lowerCamelCase = 100 ):
'''simple docstring'''
__lowercase = n * (n + 1) * (2 * n + 1) / 6
__lowercase = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 80 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : str = [0] * len(lowerCamelCase)
A_ : Union[str, Any] = []
A_ : Union[str, Any] = []
A_ : Tuple = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCamelCase)):
if indegree[i] == 0:
queue.append(lowerCamelCase)
while queue:
A_ : Any = queue.pop(0)
cnt += 1
topo.append(lowerCamelCase)
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowerCamelCase)
if cnt != len(lowerCamelCase):
print("""Cycle exists""")
else:
print(lowerCamelCase)
# Adjacency List of Graph
__magic_name__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 665 | 0 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = AutoencoderKL
__UpperCAmelCase : Optional[Any] = "sample"
__UpperCAmelCase : Optional[int] = 1e-2
@property
def __snake_case ( self : Dict ) -> Optional[Any]:
__snake_case : Optional[Any] = 4
__snake_case : Tuple = 3
__snake_case : List[str] = (32, 32)
__snake_case : str = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase )
return {"sample": image}
@property
def __snake_case ( self : Union[str, Any] ) -> Tuple:
return (3, 32, 32)
@property
def __snake_case ( self : int ) -> int:
return (3, 32, 32)
def __snake_case ( self : Optional[Any] ) -> Dict:
__snake_case : Optional[Any] = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
__snake_case : Any = self.dummy_input
return init_dict, inputs_dict
def __snake_case ( self : str ) -> Dict:
pass
def __snake_case ( self : Tuple ) -> List[str]:
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def __snake_case ( self : Any ) -> Optional[Any]:
# enable deterministic behavior for gradient checkpointing
__snake_case , __snake_case : int = self.prepare_init_args_and_inputs_for_common()
__snake_case : str = self.model_class(**lowerCamelCase )
model.to(lowerCamelCase )
assert not model.is_gradient_checkpointing and model.training
__snake_case : str = model(**lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case : Any = torch.randn_like(lowerCamelCase )
__snake_case : str = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case : Optional[int] = self.model_class(**lowerCamelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCamelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case : int = model_a(**lowerCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case : Union[str, Any] = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__snake_case : Optional[int] = dict(model.named_parameters() )
__snake_case : List[Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def __snake_case ( self : List[Any] ) -> Optional[int]:
__snake_case , __snake_case : Optional[Any] = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowerCamelCase )
__snake_case : Optional[Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __snake_case ( self : Optional[Any] ) -> Union[str, Any]:
__snake_case : Tuple = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
__snake_case : Dict = model.to(lowerCamelCase )
model.eval()
if torch_device == "mps":
__snake_case : int = torch.manual_seed(0 )
else:
__snake_case : str = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
__snake_case : List[str] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case : Union[str, Any] = image.to(lowerCamelCase )
with torch.no_grad():
__snake_case : str = model(lowerCamelCase , sample_posterior=lowerCamelCase , generator=lowerCamelCase ).sample
__snake_case : List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case : Union[str, Any] = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
__snake_case : Tuple = torch.tensor(
[-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] )
else:
__snake_case : List[str] = torch.tensor(
[-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] )
self.assertTrue(torch_all_close(lowerCamelCase , lowerCamelCase , rtol=1E-2 ) )
@slow
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : int , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] ) -> List[str]:
return F'gaussian_noise_s={seed}_shape={"_".join([str(lowerCamelCase ) for s in shape] )}.npy'
def __snake_case ( self : List[Any] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Tuple , lowerCamelCase : List[Any]=0 , lowerCamelCase : Tuple=(4, 3, 512, 512) , lowerCamelCase : Optional[int]=False ) -> str:
__snake_case : List[Any] = torch.floataa if fpaa else torch.floataa
__snake_case : Tuple = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase , lowerCamelCase ) ) ).to(lowerCamelCase ).to(lowerCamelCase )
return image
def __snake_case ( self : Optional[Any] , lowerCamelCase : int="CompVis/stable-diffusion-v1-4" , lowerCamelCase : int=False ) -> int:
__snake_case : str = "fp16" if fpaa else None
__snake_case : int = torch.floataa if fpaa else torch.floataa
__snake_case : int = AutoencoderKL.from_pretrained(
lowerCamelCase , subfolder="vae" , torch_dtype=lowerCamelCase , revision=lowerCamelCase , )
model.to(lowerCamelCase ).eval()
return model
def __snake_case ( self : str , lowerCamelCase : int=0 ) -> Optional[Any]:
if torch_device == "mps":
return torch.manual_seed(lowerCamelCase )
return torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def __snake_case ( self : List[str] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] ) -> List[Any]:
__snake_case : Optional[Any] = self.get_sd_vae_model()
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase )
__snake_case : Tuple = self.get_generator(lowerCamelCase )
with torch.no_grad():
__snake_case : Optional[Any] = model(lowerCamelCase , generator=lowerCamelCase , sample_posterior=lowerCamelCase ).sample
assert sample.shape == image.shape
__snake_case : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case : int = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]],
[47, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]],
# fmt: on
] )
@require_torch_gpu
def __snake_case ( self : Any , lowerCamelCase : List[str] , lowerCamelCase : List[str] ) -> Tuple:
__snake_case : Any = self.get_sd_vae_model(fpaa=lowerCamelCase )
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase , fpaa=lowerCamelCase )
__snake_case : List[Any] = self.get_generator(lowerCamelCase )
with torch.no_grad():
__snake_case : str = model(lowerCamelCase , generator=lowerCamelCase , sample_posterior=lowerCamelCase ).sample
assert sample.shape == image.shape
__snake_case : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case : Any = torch.tensor(lowerCamelCase )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def __snake_case ( self : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : Dict ) -> int:
__snake_case : int = self.get_sd_vae_model()
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase )
with torch.no_grad():
__snake_case : int = model(lowerCamelCase ).sample
assert sample.shape == image.shape
__snake_case : Union[str, Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case : List[str] = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]],
[37, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]],
# fmt: on
] )
@require_torch_gpu
def __snake_case ( self : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Any ) -> Optional[Any]:
__snake_case : List[str] = self.get_sd_vae_model()
__snake_case : List[Any] = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case : str = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case : str = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case : Optional[int] = torch.tensor(lowerCamelCase )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]],
[16, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]],
# fmt: on
] )
@require_torch_gpu
def __snake_case ( self : str , lowerCamelCase : Optional[int] , lowerCamelCase : Dict ) -> int:
__snake_case : int = self.get_sd_vae_model(fpaa=lowerCamelCase )
__snake_case : List[str] = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=lowerCamelCase )
with torch.no_grad():
__snake_case : Union[str, Any] = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case : Optional[Any] = torch.tensor(lowerCamelCase )
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def __snake_case ( self : Tuple , lowerCamelCase : List[Any] ) -> Tuple:
__snake_case : Dict = self.get_sd_vae_model(fpaa=lowerCamelCase )
__snake_case : Any = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) , fpaa=lowerCamelCase )
with torch.no_grad():
__snake_case : str = model.decode(lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case : Any = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def __snake_case ( self : List[Any] , lowerCamelCase : Any ) -> Optional[int]:
__snake_case : str = self.get_sd_vae_model()
__snake_case : Union[str, Any] = self.get_sd_image(lowerCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case : List[Any] = model.decode(lowerCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case : Dict = model.decode(lowerCamelCase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]],
[47, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]],
# fmt: on
] )
def __snake_case ( self : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Dict ) -> Optional[int]:
__snake_case : str = self.get_sd_vae_model()
__snake_case : int = self.get_sd_image(lowerCamelCase )
__snake_case : int = self.get_generator(lowerCamelCase )
with torch.no_grad():
__snake_case : Optional[Any] = model.encode(lowerCamelCase ).latent_dist
__snake_case : Dict = dist.sample(generator=lowerCamelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case : List[str] = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case : Dict = torch.tensor(lowerCamelCase )
__snake_case : Dict = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(lowerCamelCase , lowerCamelCase , atol=lowerCamelCase )
| 81 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[str]=7 ,_a : Dict=True ,_a : List[Any]=True ,_a : Dict=False ,_a : Optional[int]=True ,_a : List[Any]=99 ,_a : Any=32 ,_a : Optional[int]=5 ,_a : List[Any]=4 ,_a : int=37 ,_a : List[Any]="gelu" ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : Any=512 ,_a : int=16 ,_a : Optional[int]=2 ,_a : Any=0.02 ,_a : Any=3 ,_a : Any=4 ,_a : List[str]=None ,):
'''simple docstring'''
A_ : List[str] = parent
A_ : Any = batch_size
A_ : Tuple = seq_length
A_ : List[str] = is_training
A_ : Tuple = use_input_mask
A_ : Dict = use_token_type_ids
A_ : List[Any] = use_labels
A_ : Union[str, Any] = vocab_size
A_ : Any = hidden_size
A_ : str = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : str = intermediate_size
A_ : Tuple = hidden_act
A_ : Any = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : int = type_vocab_size
A_ : Union[str, Any] = type_sequence_label_size
A_ : Any = initializer_range
A_ : List[Any] = num_labels
A_ : Optional[Any] = num_choices
A_ : List[Any] = scope
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : int = None
if self.use_input_mask:
A_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Dict = None
if self.use_token_type_ids:
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : str = None
A_ : Any = None
A_ : str = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
A_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
def _a ( self : Union[str, Any] ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Any ,_a : Any ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Tuple ):
'''simple docstring'''
A_ : Any = LlamaModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[Any] = model(_a ,attention_mask=_a )
A_ : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Optional[int] ,_a : int ,_a : List[str] ,_a : Any ,_a : Any ,_a : Dict ,_a : List[str] ,_a : Optional[int] ,_a : Any ,_a : List[str] ,):
'''simple docstring'''
A_ : List[str] = True
A_ : Union[str, Any] = LlamaModel(_a )
model.to(_a )
model.eval()
A_ : Tuple = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,)
A_ : List[Any] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,)
A_ : int = model(_a ,attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Any ,_a : Any ,_a : Optional[int] ,_a : List[Any] ,_a : List[Any] ,_a : Dict ,_a : Tuple ,_a : Optional[int] ,_a : List[Any] ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : List[Any] = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
A_ : Dict = model(_a ,attention_mask=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : str ,_a : List[Any] ,_a : Dict ,_a : str ,_a : Tuple ,_a : Tuple ,_a : Tuple ,_a : Optional[Any] ,_a : Dict ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : Optional[Any] = True
A_ : Any = True
A_ : Tuple = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
A_ : Optional[int] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,use_cache=_a ,)
A_ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A_ : List[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
A_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
A_ : int = torch.cat([input_mask, next_mask] ,dim=-1 )
A_ : List[str] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
A_ : Any = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,past_key_values=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
# select random slice
A_ : List[str] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
A_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a ,_a ,atol=1e-3 ) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Any = config_and_inputs
A_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
a_ = (LlamaForCausalLM,) if is_torch_available() else ()
a_ = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = LlamaModelTester(self )
A_ : List[str] = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def _a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : Dict = type
self.model_tester.create_and_check_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = 3
A_ : Any = input_dict["""input_ids"""]
A_ : Union[str, Any] = input_ids.ne(1 ).to(_a )
A_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : int = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Dict ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : str = 3
A_ : Union[str, Any] = """single_label_classification"""
A_ : Union[str, Any] = input_dict["""input_ids"""]
A_ : List[Any] = input_ids.ne(1 ).to(_a )
A_ : Dict = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : List[str] = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = 3
A_ : Dict = """multi_label_classification"""
A_ : Any = input_dict["""input_ids"""]
A_ : Optional[Any] = input_ids.ne(1 ).to(_a )
A_ : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
A_ : Optional[int] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def _a ( self : Any ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _a ( self : Optional[Any] ,_a : List[Any] ):
'''simple docstring'''
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ids_tensor([1, 10] ,config.vocab_size )
A_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : int = LlamaModel(_a )
original_model.to(_a )
original_model.eval()
A_ : Tuple = original_model(_a ).last_hidden_state
A_ : Union[str, Any] = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : Tuple = {"""type""": scaling_type, """factor""": 10.0}
A_ : int = LlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
A_ : List[Any] = scaled_model(_a ).last_hidden_state
A_ : Any = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a ,_a ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Any = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" ,device_map="""auto""" )
A_ : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : str ):
'''simple docstring'''
A_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""auto""" )
A_ : int = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : Optional[int] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
A_ : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
A_ : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
A_ : List[str] = """Simply put, the theory of relativity states that """
A_ : Any = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
A_ : Union[str, Any] = tokenizer.encode(_a ,return_tensors="""pt""" )
A_ : List[str] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""sequential""" ,use_safetensors=_a )
# greedy generation outputs
A_ : str = model.generate(_a ,max_new_tokens=64 ,top_p=_a ,temperature=1 ,do_sample=_a )
A_ : Optional[Any] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=_a )
self.assertEqual(_a ,_a )
| 665 | 0 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = MODEL_FOR_MASKED_LM_MAPPING
UpperCamelCase = TF_MODEL_FOR_MASKED_LM_MAPPING
def lowercase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
UpperCAmelCase_ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
UpperCAmelCase_ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
UpperCAmelCase_ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def lowercase__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
UpperCAmelCase_ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
UpperCAmelCase_ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
UpperCAmelCase_ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
UpperCAmelCase_ = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=6 ) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def lowercase__ ( self : Dict ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
UpperCAmelCase_ = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@slow
@require_torch
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(_UpperCAmelCase )
@slow
@require_tf
def lowercase__ ( self : str ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(_UpperCAmelCase )
def lowercase__ ( self : Any , _UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
UpperCAmelCase_ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
] , )
UpperCAmelCase_ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(_UpperCAmelCase ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
self.run_pipeline_test(_UpperCAmelCase , [] )
@require_tf
def lowercase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
self.run_pipeline_test(_UpperCAmelCase , [] )
def lowercase__ ( self : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
UpperCAmelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCAmelCase_ = [
F"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def lowercase__ ( self : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = fill_masker.tokenizer
UpperCAmelCase_ = fill_masker.model
UpperCAmelCase_ = fill_masker(
F"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
_UpperCAmelCase , [
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
] , )
UpperCAmelCase_ = fill_masker([F"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
_UpperCAmelCase , [
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
] , )
UpperCAmelCase_ = fill_masker([F"""This is a {tokenizer.mask_token}""", F"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
_UpperCAmelCase , [
[
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
],
[
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
],
] , )
with self.assertRaises(_UpperCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_UpperCAmelCase ):
fill_masker("This is" )
self.run_test_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_targets(_UpperCAmelCase , _UpperCAmelCase )
self.run_test_top_k_targets(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(_UpperCAmelCase , _UpperCAmelCase )
self.fill_mask_with_multiple_masks(_UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = tokenizer.get_vocab()
UpperCAmelCase_ = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCAmelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , targets=_UpperCAmelCase )
UpperCAmelCase_ = fill_masker(F"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
] , )
UpperCAmelCase_ = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , _UpperCAmelCase )
UpperCAmelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(_UpperCAmelCase ) )
# Call argument
UpperCAmelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCAmelCase_ = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
] , )
UpperCAmelCase_ = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , _UpperCAmelCase )
UpperCAmelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(_UpperCAmelCase ) )
# Score equivalence
UpperCAmelCase_ = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
UpperCAmelCase_ = [top_mask["token_str"] for top_mask in outputs]
UpperCAmelCase_ = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ) == set(_UpperCAmelCase ):
UpperCAmelCase_ = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=_UpperCAmelCase )
UpperCAmelCase_ = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
# Raises with invalid
with self.assertRaises(_UpperCAmelCase ):
UpperCAmelCase_ = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_UpperCAmelCase ):
UpperCAmelCase_ = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[""] )
with self.assertRaises(_UpperCAmelCase ):
UpperCAmelCase_ = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets="" )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , top_k=2 )
UpperCAmelCase_ = fill_masker(F"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
_UpperCAmelCase , [
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
] , )
UpperCAmelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCAmelCase_ = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
] , )
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def lowercase__ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = tokenizer.get_vocab()
UpperCAmelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# top_k=2, ntargets=3
UpperCAmelCase_ = sorted(vocab.keys() )[:3]
UpperCAmelCase_ = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=_UpperCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCAmelCase_ = [el["token_str"] for el in sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : x["score"] , reverse=_UpperCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_UpperCAmelCase ).issubset(_UpperCAmelCase ):
UpperCAmelCase_ = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=_UpperCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_UpperCAmelCase ) , nested_simplify(_UpperCAmelCase ) )
def lowercase__ ( self : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCAmelCase_ = sorted(vocab.keys() )[:3]
UpperCAmelCase_ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCAmelCase_ = fill_masker(F"""My name is {tokenizer.mask_token}""" , targets=_UpperCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_UpperCAmelCase ) , 3 )
def lowercase__ ( self : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = FillMaskPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
UpperCAmelCase_ = fill_masker(
F"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
_UpperCAmelCase , [
[
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
],
[
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
],
[
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
{"sequence": ANY(_UpperCAmelCase ), "score": ANY(_UpperCAmelCase ), "token": ANY(_UpperCAmelCase ), "token_str": ANY(_UpperCAmelCase )},
],
] , )
| 82 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__magic_name__ = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__magic_name__ = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__magic_name__ = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def _a ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) ,homepage="""https://github.com/hendrycks/math""" ,codebase_urls=["""https://github.com/hendrycks/math"""] ,)
def _a ( self : List[Any] ,_a : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = 0.0
for i, j in zip(_a ,_a ):
n_correct += 1.0 if math_equivalence.is_equiv(_a ,_a ) else 0.0
A_ : List[str] = n_correct / len(_a )
return {
"accuracy": accuracy,
}
| 665 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 83 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """retribert"""
def __init__( self : int ,_a : Dict=30522 ,_a : List[Any]=768 ,_a : Optional[Any]=8 ,_a : str=12 ,_a : str=3072 ,_a : Tuple="gelu" ,_a : Optional[int]=0.1 ,_a : Dict=0.1 ,_a : List[Any]=512 ,_a : Union[str, Any]=2 ,_a : Tuple=0.02 ,_a : List[str]=1e-12 ,_a : Dict=True ,_a : Tuple=128 ,_a : Optional[int]=0 ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,**_a )
A_ : Dict = vocab_size
A_ : int = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : int = intermediate_size
A_ : Tuple = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : Optional[int] = initializer_range
A_ : Dict = layer_norm_eps
A_ : str = share_encoders
A_ : List[Any] = projection_dim
| 665 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase = {
'''gpt-neox-20b''': 2048,
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : str = VOCAB_FILES_NAMES
_UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case=False , **snake_case , ):
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , unk_token=snake_case , bos_token=snake_case , eos_token=snake_case , add_prefix_space=snake_case , **snake_case , )
lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case ) != add_prefix_space:
lowercase = getattr(snake_case , pre_tok_state.pop('type' ) )
lowercase = add_prefix_space
lowercase = pre_tok_class(**snake_case )
lowercase = add_prefix_space
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case , add_special_tokens=snake_case ) + [self.eos_token_id] )
if len(snake_case ) > self.model_max_length:
lowercase = input_ids[-self.model_max_length :]
return input_ids
| 84 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'spiece.model'}
__magic_name__ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
__magic_name__ = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = []
def __init__( self : Optional[int] ,_a : int ,_a : Optional[Any]="<unk>" ,_a : int="<s>" ,_a : str="</s>" ,_a : Optional[Any]="<pad>" ,_a : Tuple="[SEP]" ,_a : Tuple="[MASK]" ,_a : Union[str, Any]="[CLS]" ,_a : Optional[Dict[str, Any]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : List[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
A_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,pad_token=_a ,sep_token=_a ,mask_token=_a ,cls_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
A_ : Optional[int] = vocab_file
A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Tuple = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self.__dict__.copy()
A_ : Union[str, Any] = None
return state
def __setstate__( self : List[Any] ,_a : Any ):
'''simple docstring'''
A_ : Tuple = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
A_ : Tuple = {}
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self : Union[str, Any] ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def _a ( self : Optional[int] ,_a : str ):
'''simple docstring'''
return self.sp_model.piece_to_id(_a )
def _a ( self : int ,_a : Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.sp_model.IdToPiece(_a )
return token
def _a ( self : Dict ,_a : int ):
'''simple docstring'''
A_ : int = []
A_ : Any = """"""
A_ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
A_ : Dict = True
A_ : Union[str, Any] = []
else:
current_sub_tokens.append(_a )
A_ : str = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def _a ( self : int ,_a : List[int] ,_a : bool = False ,_a : bool = None ,_a : bool = True ,**_a : str ,):
'''simple docstring'''
A_ : Any = kwargs.pop("""use_source_tokenizer""" ,_a )
A_ : Union[str, Any] = self.convert_ids_to_tokens(_a ,skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A_ : str = []
A_ : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
A_ : List[str] = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
A_ : Optional[int] = re.sub(r""" (\[(MASK|SEP)\])""" ,r"""\1""" ,""" """.join(_a ) )
else:
A_ : Tuple = """""".join(_a )
A_ : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A_ : Optional[Any] = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,"""wb""" ) as fi:
A_ : str = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def _a ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : List[Any] = [self.cls_token_id]
A_ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def _a ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Tuple = [self.sep_token_id]
A_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 665 | 0 |
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def _a ( lowercase__ : str , lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = multiprocessing.Manager()
SCREAMING_SNAKE_CASE__ : Tuple = manager.list()
SCREAMING_SNAKE_CASE__ : Dict = multiprocessing.Process(target=lowercase__ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def _a ( lowercase__ : Tuple , lowercase__ : Any , lowercase__ : int ):
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
SCREAMING_SNAKE_CASE__ : Tuple = shutil.rmtree
SCREAMING_SNAKE_CASE__ : Dict = os.rmdir
SCREAMING_SNAKE_CASE__ : str = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
with swallow_io():
with time_limit(lowercase__ ):
exec(lowercase__ , lowercase__ )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(f'''failed: {e}''' )
# Needed for cleaning up.
SCREAMING_SNAKE_CASE__ : Optional[Any] = rmtree
SCREAMING_SNAKE_CASE__ : Any = rmdir
SCREAMING_SNAKE_CASE__ : Any = chdir
@contextlib.contextmanager
def _a ( lowercase__ : Optional[Any] ):
'''simple docstring'''
def signal_handler(lowercase__ : List[Any] , lowercase__ : Dict ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , lowercase__ )
signal.signal(signal.SIGALRM , lowercase__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = WriteOnlyStringIO()
with contextlib.redirect_stdout(lowercase__ ):
with contextlib.redirect_stderr(lowercase__ ):
with redirect_stdin(lowercase__ ):
yield
@contextlib.contextmanager
def _a ( ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(lowercase__ ):
yield dirname
class snake_case ( UpperCamelCase_ ):
pass
class snake_case ( io.StringIO ):
def __lowercase( self : Union[str, Any] , *a_ : Union[str, Any] , **a_ : List[str] )-> int:
"""simple docstring"""
raise OSError
def __lowercase( self : Tuple , *a_ : Tuple , **a_ : Any )-> int:
"""simple docstring"""
raise OSError
def __lowercase( self : Union[str, Any] , *a_ : Union[str, Any] , **a_ : List[str] )-> Dict:
"""simple docstring"""
raise OSError
def __lowercase( self : Union[str, Any] , *a_ : Any , **a_ : Optional[int] )-> List[str]:
"""simple docstring"""
return False
class snake_case ( contextlib._RedirectStream ): # type: ignore
lowercase_ = 'stdin'
@contextlib.contextmanager
def _a ( lowercase__ : Union[str, Any] ):
'''simple docstring'''
if root == ".":
yield
return
SCREAMING_SNAKE_CASE__ : str = os.getcwd()
os.chdir(lowercase__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(lowercase__ )
def _a ( lowercase__ : Optional[int]=None ):
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
import os
SCREAMING_SNAKE_CASE__ : Optional[Any] = '1'
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
import shutil
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
import subprocess
SCREAMING_SNAKE_CASE__ : Tuple = None # type: ignore
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
import sys
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : int = None
| 85 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _a ( self : List[str] ,_a : int ,_a : Any ,_a : int ):
'''simple docstring'''
A_ : Dict = TextaTextGenerationPipeline(model=_a ,tokenizer=_a )
return generator, ["Something to write", "Something else"]
def _a ( self : str ,_a : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : Any = generator("""Something there""" )
self.assertEqual(_a ,[{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
A_ : List[Any] = generator(["""This is great !""", """Something else"""] ,num_return_sequences=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
A_ : List[str] = generator(
["""This is great !""", """Something else"""] ,num_return_sequences=2 ,batch_size=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
with self.assertRaises(_a ):
generator(4 )
@require_torch
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : int = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""pt""" )
# do_sample=False necessary for reproducibility
A_ : Tuple = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
A_ : Optional[int] = 3
A_ : Tuple = generator(
"""Something there""" ,num_return_sequences=_a ,num_beams=_a ,)
A_ : Optional[Any] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a ,_a )
A_ : Optional[int] = generator("""This is a test""" ,do_sample=_a ,num_return_sequences=2 ,return_tensors=_a )
self.assertEqual(
_a ,[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] ,)
A_ : Dict = generator.model.config.eos_token_id
A_ : Optional[int] = """<pad>"""
A_ : List[Any] = generator(
["""This is a test""", """This is a second test"""] ,do_sample=_a ,num_return_sequences=2 ,batch_size=2 ,return_tensors=_a ,)
self.assertEqual(
_a ,[
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] ,)
@require_tf
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""tf""" )
# do_sample=False necessary for reproducibility
A_ : Dict = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
| 665 | 0 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = prime_factors(__UpperCamelCase )
if is_square_free(__UpperCamelCase ):
return -1 if len(__UpperCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """gpt_bigcode"""
a_ = ["""past_key_values"""]
a_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int] ,_a : Optional[int]=50257 ,_a : Dict=1024 ,_a : Union[str, Any]=768 ,_a : Union[str, Any]=12 ,_a : Union[str, Any]=12 ,_a : Tuple=None ,_a : int="gelu_pytorch_tanh" ,_a : Optional[Any]=0.1 ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : List[Any]=1e-5 ,_a : List[str]=0.02 ,_a : Any=True ,_a : Union[str, Any]=True ,_a : Tuple=50256 ,_a : Optional[int]=50256 ,_a : int=True ,_a : Optional[int]=True ,_a : Optional[int]=True ,**_a : List[str] ,):
'''simple docstring'''
A_ : Optional[Any] = vocab_size
A_ : int = n_positions
A_ : Union[str, Any] = n_embd
A_ : int = n_layer
A_ : Optional[int] = n_head
A_ : Union[str, Any] = n_inner
A_ : List[Any] = activation_function
A_ : Dict = resid_pdrop
A_ : int = embd_pdrop
A_ : Optional[int] = attn_pdrop
A_ : Union[str, Any] = layer_norm_epsilon
A_ : int = initializer_range
A_ : Union[str, Any] = scale_attn_weights
A_ : List[str] = use_cache
A_ : Tuple = attention_softmax_in_fpaa
A_ : List[str] = scale_attention_softmax_in_fpaa
A_ : Union[str, Any] = multi_query
A_ : Any = bos_token_id
A_ : Optional[int] = eos_token_id
super().__init__(bos_token_id=_a ,eos_token_id=_a ,**_a )
| 665 | 0 |
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Any:
"""simple docstring"""
A__ = [0 for i in range(r + 1 )]
# nc0 = 1
A__ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
A__ = min(lowercase_ , lowercase_ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 87 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__magic_name__ = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__magic_name__ = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase ( ):
A_ : Union[str, Any] = (
list(range(ord("""!""") , ord("""~""") + 1)) + list(range(ord("""¡""") , ord("""¬""") + 1)) + list(range(ord("""®""") , ord("""ÿ""") + 1))
)
A_ : Optional[Any] = bs[:]
A_ : List[str] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase)
cs.append(2**8 + n)
n += 1
A_ : List[Any] = [chr(lowerCamelCase) for n in cs]
return dict(zip(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int):
A_ : int = set()
A_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
A_ : List[str] = char
return pairs
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : int ,_a : Tuple ,_a : Union[str, Any] ,_a : Optional[Any]="replace" ,_a : Union[str, Any]="<s>" ,_a : Union[str, Any]="</s>" ,_a : int="</s>" ,_a : List[str]="<s>" ,_a : List[Any]="<unk>" ,_a : Any="<pad>" ,_a : Dict="<mask>" ,_a : Optional[int]=False ,**_a : List[Any] ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
super().__init__(
errors=_a ,bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,**_a ,)
with open(_a ,encoding="""utf-8""" ) as vocab_handle:
A_ : str = json.load(_a )
A_ : Optional[int] = {v: k for k, v in self.encoder.items()}
A_ : List[str] = errors # how to handle errors in decoding
A_ : List[str] = bytes_to_unicode()
A_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_a ,encoding="""utf-8""" ) as merges_handle:
A_ : Any = merges_handle.read().split("""\n""" )[1:-1]
A_ : str = [tuple(merge.split() ) for merge in bpe_merges]
A_ : int = dict(zip(_a ,range(len(_a ) ) ) )
A_ : List[Any] = {}
A_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A_ : Optional[Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def _a ( self : Any ):
'''simple docstring'''
return len(self.encoder )
def _a ( self : str ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def _a ( self : int ,_a : int ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A_ : Optional[int] = tuple(_a )
A_ : Any = get_pairs(_a )
if not pairs:
return token
while True:
A_ : Optional[Any] = min(_a ,key=lambda _a : self.bpe_ranks.get(_a ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A_ , A_ : Dict = bigram
A_ : int = []
A_ : Optional[Any] = 0
while i < len(_a ):
try:
A_ : List[str] = word.index(_a ,_a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A_ : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A_ : str = tuple(_a )
A_ : str = new_word
if len(_a ) == 1:
break
else:
A_ : int = get_pairs(_a )
A_ : Optional[int] = """ """.join(_a )
A_ : List[str] = word
return word
def _a ( self : Dict ,_a : Optional[int] ):
'''simple docstring'''
A_ : Any = []
for token in re.findall(self.pat ,_a ):
A_ : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(""" """ ) )
return bpe_tokens
def _a ( self : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
return self.encoder.get(_a ,self.encoder.get(self.unk_token ) )
def _a ( self : int ,_a : Dict ):
'''simple docstring'''
return self.decoder.get(_a )
def _a ( self : Optional[int] ,_a : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = """""".join(_a )
A_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_a ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_a ,ensure_ascii=_a ) + """\n""" )
A_ : int = 0
with open(_a ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
A_ : Dict = token_index
writer.write(""" """.join(_a ) + """\n""" )
index += 1
return vocab_file, merge_file
def _a ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : int = [self.cls_token_id]
A_ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Union[str, Any] = [self.sep_token_id]
A_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : str ,_a : Optional[int] ,_a : Union[str, Any]=False ,**_a : Dict ):
'''simple docstring'''
A_ : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
A_ : Optional[int] = """ """ + text
return (text, kwargs)
| 665 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowercase__ :
__UpperCAmelCase = BlenderbotSmallConfig
__UpperCAmelCase = {}
__UpperCAmelCase = '''gelu'''
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=20 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , ) -> Union[str, Any]:
_lowerCamelCase : List[Any] = parent
_lowerCamelCase : Dict = batch_size
_lowerCamelCase : List[str] = seq_length
_lowerCamelCase : Union[str, Any] = is_training
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : List[str] = vocab_size
_lowerCamelCase : int = hidden_size
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : Dict = num_attention_heads
_lowerCamelCase : Tuple = intermediate_size
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : Any = max_position_embeddings
_lowerCamelCase : Optional[int] = eos_token_id
_lowerCamelCase : List[str] = pad_token_id
_lowerCamelCase : Dict = bos_token_id
def UpperCamelCase_ ( self) -> List[str]:
_lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size)
_lowerCamelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1)
_lowerCamelCase : str = tf.concat([input_ids, eos_tensor] , axis=1)
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_lowerCamelCase : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowerCamelCase : Tuple = prepare_blenderbot_small_inputs_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
return config, inputs_dict
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Union[str, Any]:
_lowerCamelCase : List[str] = TFBlenderbotSmallModel(config=SCREAMING_SNAKE_CASE).get_decoder()
_lowerCamelCase : int = inputs_dict["""input_ids"""]
_lowerCamelCase : List[str] = input_ids[:1, :]
_lowerCamelCase : Any = inputs_dict["""attention_mask"""][:1, :]
_lowerCamelCase : Any = inputs_dict["""head_mask"""]
_lowerCamelCase : Union[str, Any] = 1
# first forward pass
_lowerCamelCase : int = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , head_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE)
_lowerCamelCase , _lowerCamelCase : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size)
_lowerCamelCase : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta)
# append to next input_ids and
_lowerCamelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1)
_lowerCamelCase : Optional[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1)
_lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE)[0]
_lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE)[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1])
# select random slice
_lowerCamelCase : Union[str, Any] = int(ids_tensor((1,) , output_from_past.shape[-1]))
_lowerCamelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx]
_lowerCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , rtol=1e-3)
def _snake_case ( __snake_case : str , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : Optional[int]=None , __snake_case : Any=None , __snake_case : Optional[int]=None , __snake_case : str=None , __snake_case : str=None , ):
"""simple docstring"""
if attention_mask is None:
_lowerCamelCase : Any = tf.cast(tf.math.not_equal(__snake_case , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowerCamelCase : Any = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowerCamelCase : str = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCamelCase : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowerCamelCase : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase__ ( A_ ,A_ ,unittest.TestCase ):
__UpperCAmelCase = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__UpperCAmelCase = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__UpperCAmelCase = (
{
'''conversational''': TFBlenderbotSmallForConditionalGeneration,
'''feature-extraction''': TFBlenderbotSmallModel,
'''summarization''': TFBlenderbotSmallForConditionalGeneration,
'''text2text-generation''': TFBlenderbotSmallForConditionalGeneration,
'''translation''': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
def UpperCamelCase_ ( self) -> List[str]:
_lowerCamelCase : Optional[Any] = TFBlenderbotSmallModelTester(self)
_lowerCamelCase : Tuple = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE)
@require_tokenizers
@require_tf
class lowercase__ ( unittest.TestCase ):
__UpperCAmelCase = [
'''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '''
''' i\'m going to throw up.\nand why is that?'''
]
__UpperCAmelCase = '''facebook/blenderbot_small-90M'''
@cached_property
def UpperCamelCase_ ( self) -> Dict:
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""")
@cached_property
def UpperCamelCase_ ( self) -> Any:
_lowerCamelCase : int = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
@slow
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : int = self.tokenizer(self.src_text , return_tensors="""tf""")
_lowerCamelCase : List[Any] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=SCREAMING_SNAKE_CASE , )
_lowerCamelCase : Dict = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=SCREAMING_SNAKE_CASE)[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 88 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.txt'}
__magic_name__ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__magic_name__ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__magic_name__ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ConvBertTokenizer
def __init__( self : str ,_a : Dict=None ,_a : List[Any]=None ,_a : Dict=True ,_a : List[str]="[UNK]" ,_a : Any="[SEP]" ,_a : str="[PAD]" ,_a : List[Any]="[CLS]" ,_a : List[str]="[MASK]" ,_a : Union[str, Any]=True ,_a : Any=None ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
A_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_a ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_a ) != tokenize_chinese_chars
):
A_ : Dict = getattr(_a ,normalizer_state.pop("""type""" ) )
A_ : str = do_lower_case
A_ : Any = strip_accents
A_ : int = tokenize_chinese_chars
A_ : Tuple = normalizer_class(**_a )
A_ : Any = do_lower_case
def _a ( self : List[Any] ,_a : List[Any] ,_a : Any=None ):
'''simple docstring'''
A_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : int = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : List[Any] = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 665 | 0 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class _lowerCamelCase( enum.Enum ):
lowercase_ : int = 0
lowercase_ : str = 1
lowercase_ : Optional[Any] = 2
@add_end_docstrings(_a )
class _lowerCamelCase( _a ):
lowercase_ : List[str] = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
super().__init__(*lowerCamelCase, **lowerCamelCase)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_lowercase : int = None
if self.model.config.prefix is not None:
_lowercase : str = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_lowercase : str = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_lowercase , _lowercase , _lowercase : Optional[Any] = self._sanitize_parameters(prefix=lowerCamelCase, **self._forward_params)
_lowercase : Dict = {**self._preprocess_params, **preprocess_params}
_lowercase : str = {**self._forward_params, **forward_params}
def UpperCamelCase ( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase, ) -> int:
"""simple docstring"""
_lowercase : List[str] = {}
if prefix is not None:
_lowercase : str = prefix
if prefix:
_lowercase : Dict = self.tokenizer(
lowerCamelCase, padding=lowerCamelCase, add_special_tokens=lowerCamelCase, return_tensors=self.framework)
_lowercase : Any = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']')
_lowercase : str = handle_long_generation
preprocess_params.update(lowerCamelCase)
_lowercase : Optional[int] = generate_kwargs
_lowercase : Union[str, Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`')
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`')
_lowercase : Union[str, Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`')
_lowercase : List[str] = ReturnType.TENSORS
if return_type is not None:
_lowercase : Tuple = return_type
if clean_up_tokenization_spaces is not None:
_lowercase : Optional[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
_lowercase : List[Any] = self.tokenizer.encode(lowerCamelCase, add_special_tokens=lowerCamelCase)
if len(lowerCamelCase) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.')
_lowercase : Optional[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True})
return super()._parse_and_tokenize(*lowerCamelCase, **lowerCamelCase)
def __call__( self, lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
return super().__call__(lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase="", lowerCamelCase=None, **lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : List[str] = self.tokenizer(
prefix + prompt_text, padding=lowerCamelCase, add_special_tokens=lowerCamelCase, return_tensors=self.framework)
_lowercase : Optional[int] = prompt_text
if handle_long_generation == "hole":
_lowercase : Dict = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
_lowercase : Any = generate_kwargs['max_new_tokens']
else:
_lowercase : Tuple = generate_kwargs.get('max_length', self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected')
if cur_len + new_tokens > self.tokenizer.model_max_length:
_lowercase : Optional[Any] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length')
_lowercase : Tuple = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
_lowercase : Optional[int] = inputs['attention_mask'][:, -keep_length:]
return inputs
def UpperCamelCase ( self, lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Any = model_inputs['input_ids']
_lowercase : str = model_inputs.get('attention_mask', lowerCamelCase)
# Allow empty prompts
if input_ids.shape[1] == 0:
_lowercase : List[str] = None
_lowercase : int = None
_lowercase : str = 1
else:
_lowercase : Dict = input_ids.shape[0]
_lowercase : int = model_inputs.pop('prompt_text')
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_lowercase : Optional[int] = generate_kwargs.pop('prefix_length', 0)
if prefix_length > 0:
_lowercase : int = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
_lowercase : Union[str, Any] = generate_kwargs.get('max_length') or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_lowercase : str = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_lowercase : Dict = self.model.generate(input_ids=lowerCamelCase, attention_mask=lowerCamelCase, **lowerCamelCase)
_lowercase : int = generated_sequence.shape[0]
if self.framework == "pt":
_lowercase : Optional[Any] = generated_sequence.reshape(lowerCamelCase, out_b // in_b, *generated_sequence.shape[1:])
elif self.framework == "tf":
_lowercase : Optional[int] = tf.reshape(lowerCamelCase, (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=ReturnType.FULL_TEXT, lowerCamelCase=True) -> List[Any]:
"""simple docstring"""
_lowercase : Tuple = model_outputs['generated_sequence'][0]
_lowercase : str = model_outputs['input_ids']
_lowercase : Any = model_outputs['prompt_text']
_lowercase : str = generated_sequence.numpy().tolist()
_lowercase : Union[str, Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_lowercase : Dict = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_lowercase : Union[str, Any] = self.tokenizer.decode(
lowerCamelCase, skip_special_tokens=lowerCamelCase, clean_up_tokenization_spaces=lowerCamelCase, )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_lowercase : Union[str, Any] = 0
else:
_lowercase : Dict = len(
self.tokenizer.decode(
input_ids[0], skip_special_tokens=lowerCamelCase, clean_up_tokenization_spaces=lowerCamelCase, ))
if return_type == ReturnType.FULL_TEXT:
_lowercase : int = prompt_text + text[prompt_length:]
else:
_lowercase : List[str] = text[prompt_length:]
_lowercase : Dict = {'generated_text': all_text}
records.append(lowerCamelCase)
return records
| 89 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__magic_name__ = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = BartTokenizer
def __init__( self : str ,_a : Any=None ,_a : Optional[int]=None ,_a : int=None ,_a : Optional[int]="replace" ,_a : Dict="<s>" ,_a : Optional[Any]="</s>" ,_a : Dict="</s>" ,_a : Tuple="<s>" ,_a : Optional[Any]="<unk>" ,_a : List[str]="<pad>" ,_a : int="<mask>" ,_a : str=False ,_a : List[str]=True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(
_a ,_a ,tokenizer_file=_a ,errors=_a ,bos_token=_a ,eos_token=_a ,sep_token=_a ,cls_token=_a ,unk_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,trim_offsets=_a ,**_a ,)
A_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : List[str] = getattr(_a ,pre_tok_state.pop("""type""" ) )
A_ : Optional[int] = add_prefix_space
A_ : int = pre_tok_class(**_a )
A_ : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ : str = """post_processor"""
A_ : List[Any] = getattr(self.backend_tokenizer ,_a ,_a )
if tokenizer_component_instance:
A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : Tuple = tuple(state["""sep"""] )
if "cls" in state:
A_ : Tuple = tuple(state["""cls"""] )
A_ : List[str] = False
if state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : Dict = add_prefix_space
A_ : Any = True
if state.get("""trim_offsets""" ,_a ) != trim_offsets:
A_ : Union[str, Any] = trim_offsets
A_ : List[Any] = True
if changes_to_apply:
A_ : Optional[int] = getattr(_a ,state.pop("""type""" ) )
A_ : Tuple = component_class(**_a )
setattr(self.backend_tokenizer ,_a ,_a )
@property
def _a ( self : List[str] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _a ( self : Union[str, Any] ,_a : Any ):
'''simple docstring'''
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else value
A_ : List[Any] = value
def _a ( self : str ,*_a : str ,**_a : Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*_a ,**_a )
def _a ( self : str ,*_a : List[Any] ,**_a : str ):
'''simple docstring'''
A_ : List[str] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*_a ,**_a )
def _a ( self : Optional[int] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : str = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
def _a ( self : str ,_a : Optional[int] ,_a : int=None ):
'''simple docstring'''
A_ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Dict = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 665 | 0 |
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__UpperCAmelCase = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
__UpperCAmelCase = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
__UpperCAmelCase = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _snake_case ( A , A ) -> Union[str, Any]:
return float((preds == labels).mean() )
def _snake_case ( A , A ) -> Union[str, Any]:
lowerCAmelCase__ = simple_accuracy(A , A )
lowerCAmelCase__ = float(fa_score(y_true=A , y_pred=A ) )
return {
"accuracy": acc,
"f1": fa,
}
def _snake_case ( A , A ) -> int:
lowerCAmelCase__ = float(pearsonr(A , A )[0] )
lowerCAmelCase__ = float(spearmanr(A , A )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(lowerCamelCase_ , lowerCamelCase_ )}
elif self.config_name == "stsb":
return pearson_and_spearman(lowerCamelCase_ , lowerCamelCase_ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(lowerCamelCase_ , lowerCamelCase_ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' ) | 90 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
A_ : int = TapasConfig.from_json_file(lowerCamelCase)
# set absolute/relative position embeddings parameter
A_ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
A_ : Optional[int] = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WTQ":
# run_task_main.py hparams
A_ : Tuple = 4
A_ : Optional[Any] = True
# hparam_utils.py hparams
A_ : Any = 0.66_4694
A_ : str = 0.20_7951
A_ : Any = 0.12_1194
A_ : str = True
A_ : Dict = True
A_ : int = False
A_ : int = 0.035_2513
A_ : Tuple = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
A_ : int = 4
A_ : Union[str, Any] = False
# hparam_utils.py hparams
A_ : Dict = 36.4519
A_ : List[Any] = 0.90_3421
A_ : Any = 222.088
A_ : Optional[Any] = True
A_ : Optional[int] = True
A_ : Optional[Any] = True
A_ : Optional[int] = 0.76_3141
A_ : Any = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "TABFACT":
A_ : Any = TapasForSequenceClassification(config=lowerCamelCase)
elif task == "MLM":
A_ : List[Any] = TapasForMaskedLM(config=lowerCamelCase)
elif task == "INTERMEDIATE_PRETRAINING":
A_ : Union[str, Any] = TapasModel(config=lowerCamelCase)
else:
raise ValueError(F'Task {task} not supported.')
print(F'Building PyTorch model from configuration: {config}')
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}')
model.save_pretrained(lowerCamelCase)
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}')
A_ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512)
tokenizer.save_pretrained(lowerCamelCase)
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 665 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
_lowercase = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = '''ernie_m'''
_lowerCamelCase: Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : Optional[Any] ,A_ : int = 25_0002 ,A_ : int = 768 ,A_ : int = 12 ,A_ : int = 12 ,A_ : int = 3072 ,A_ : str = "gelu" ,A_ : float = 0.1 ,A_ : float = 0.1 ,A_ : int = 514 ,A_ : float = 0.02 ,A_ : int = 1 ,A_ : float = 1e-05 ,A_ : Tuple=None ,A_ : str=False ,A_ : Tuple=0.0 ,**A_ : Optional[Any] ,) -> Dict:
super().__init__(pad_token_id=A_ ,**A_ )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = initializer_range
A = layer_norm_eps
A = classifier_dropout
A = is_decoder
A = act_dropout | 91 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""vqvae"""]
def __init__( self : Optional[Any] ,_a : AutoencoderKL ,_a : UNetaDConditionModel ,_a : Mel ,_a : Union[DDIMScheduler, DDPMScheduler] ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_a ,scheduler=_a ,mel=_a ,vqvae=_a )
def _a ( self : str ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_a ) else 1000
@torch.no_grad()
def __call__( self : Optional[int] ,_a : int = 1 ,_a : str = None ,_a : np.ndarray = None ,_a : int = 0 ,_a : int = 0 ,_a : int = None ,_a : torch.Generator = None ,_a : float = 0 ,_a : float = 0 ,_a : torch.Generator = None ,_a : float = 0 ,_a : torch.Tensor = None ,_a : torch.Tensor = None ,_a : int=True ,):
'''simple docstring'''
A_ : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_a )
A_ : Union[str, Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
A_ : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
A_ : int = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_a ,device=self.device ,)
A_ : List[Any] = noise
A_ : str = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_a ,_a )
A_ : Any = self.mel.audio_slice_to_image(_a )
A_ : Union[str, Any] = np.frombuffer(input_image.tobytes() ,dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
A_ : Optional[Any] = (input_image / 255) * 2 - 1
A_ : Union[str, Any] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
A_ : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(_a ,0 ) ).latent_dist.sample(
generator=_a )[0]
A_ : List[str] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
A_ : Any = self.scheduler.add_noise(_a ,_a ,self.scheduler.timesteps[start_step - 1] )
A_ : Tuple = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
A_ : Tuple = int(mask_start_secs * pixels_per_second )
A_ : str = int(mask_end_secs * pixels_per_second )
A_ : int = self.scheduler.add_noise(_a ,_a ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_a ):
A_ : Optional[Any] = self.unet(_a ,_a ,_a )["""sample"""]
else:
A_ : List[Any] = self.unet(_a ,_a )["""sample"""]
if isinstance(self.scheduler ,_a ):
A_ : Dict = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,eta=_a ,generator=_a ,)["""prev_sample"""]
else:
A_ : Any = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,generator=_a ,)["""prev_sample"""]
if mask is not None:
if mask_start > 0:
A_ : Tuple = mask[:, step, :, :mask_start]
if mask_end > 0:
A_ : List[str] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
A_ : str = 1 / self.vqvae.config.scaling_factor * images
A_ : Union[str, Any] = self.vqvae.decode(_a )["""sample"""]
A_ : int = (images / 2 + 0.5).clamp(0 ,1 )
A_ : str = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
A_ : Optional[int] = (images * 255).round().astype("""uint8""" )
A_ : List[Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_a ,mode="""RGB""" ).convert("""L""" ) for _ in images) )
A_ : Tuple = [self.mel.image_to_audio(_a ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_a )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_a ) )
@torch.no_grad()
def _a ( self : Union[str, Any] ,_a : List[Image.Image] ,_a : int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_a )
self.scheduler.set_timesteps(_a )
A_ : Optional[Any] = np.array(
[np.frombuffer(image.tobytes() ,dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
A_ : List[str] = (sample / 255) * 2 - 1
A_ : Optional[int] = torch.Tensor(_a ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
A_ : List[str] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
A_ : Any = self.scheduler.alphas_cumprod[t]
A_ : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
A_ : str = 1 - alpha_prod_t
A_ : List[str] = self.unet(_a ,_a )["""sample"""]
A_ : str = (1 - alpha_prod_t_prev) ** 0.5 * model_output
A_ : Union[str, Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
A_ : Optional[int] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _a ( _a : torch.Tensor ,_a : torch.Tensor ,_a : float ):
'''simple docstring'''
A_ : List[Any] = acos(torch.dot(torch.flatten(_a ) ,torch.flatten(_a ) ) / torch.norm(_a ) / torch.norm(_a ) )
return sin((1 - alpha) * theta ) * xa / sin(_a ) + sin(alpha * theta ) * xa / sin(_a )
| 665 | 0 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 92 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__magic_name__ = 16
__magic_name__ = 32
def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16):
A_ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""")
A_ : str = load_dataset("""glue""" , """mrpc""")
def tokenize_function(lowerCamelCase : Dict):
# max_length=None => use the model max length (it's actually the default)
A_ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A_ : Tuple = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(lowerCamelCase : Tuple):
# On TPU it's best to pad everything to the same length or training will be very slow.
A_ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A_ : List[Any] = 16
elif accelerator.mixed_precision != "no":
A_ : Any = 8
else:
A_ : Tuple = None
return tokenizer.pad(
lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
A_ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=lowerCamelCase)
A_ : str = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Dict):
# Initialize accelerator
A_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : List[Any] = config["""lr"""]
A_ : List[Any] = int(config["""num_epochs"""])
A_ : int = int(config["""seed"""])
A_ : Dict = int(config["""batch_size"""])
A_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""")
# If the batch size is too big we use gradient accumulation
A_ : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A_ : Any = batch_size // MAX_GPU_BATCH_SIZE
A_ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(lowerCamelCase)
A_ , A_ : List[str] = get_dataloaders(lowerCamelCase , lowerCamelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ : str = model.to(accelerator.device)
# Instantiate optimizer
A_ : str = AdamW(params=model.parameters() , lr=lowerCamelCase)
# Instantiate scheduler
A_ : Tuple = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ , A_ , A_ , A_ , A_ : Union[str, Any] = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Now we train the model
for epoch in range(lowerCamelCase):
model.train()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
A_ : Optional[int] = model(**lowerCamelCase)
A_ : List[Any] = outputs.loss
A_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
A_ : Union[str, Any] = model(**lowerCamelCase)
A_ : Any = outputs.logits.argmax(dim=-1)
A_ , A_ : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]))
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
A_ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase)
def lowerCamelCase ( ):
A_ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""")
parser.add_argument(
"""--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""")
A_ : Dict = parser.parse_args()
A_ : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 0 |
"""simple docstring"""
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = None
lowerCAmelCase__ :Any = None
lowerCAmelCase__ :Any = graph
self._normalize_graph(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :int = len(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = None
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if sources is int:
lowerCAmelCase__ :List[Any] = [sources]
if sinks is int:
lowerCAmelCase__ :int = [sinks]
if len(__UpperCAmelCase ) == 0 or len(__UpperCAmelCase ) == 0:
return
lowerCAmelCase__ :List[str] = sources[0]
lowerCAmelCase__ :str = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__UpperCAmelCase ) > 1 or len(__UpperCAmelCase ) > 1:
lowerCAmelCase__ :Any = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowerCAmelCase__ :Optional[int] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowerCAmelCase__ :Optional[Any] = max_input_flow
lowerCAmelCase__ :int = 0
lowerCAmelCase__ :Optional[Any] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowerCAmelCase__ :Any = max_input_flow
lowerCAmelCase__ :int = size - 1
def snake_case ( self ):
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = algorithm(self )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = flow_network
lowerCAmelCase__ :Optional[int] = flow_network.verticesCount
lowerCAmelCase__ :Optional[Any] = flow_network.sourceIndex
lowerCAmelCase__ :Dict = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowerCAmelCase__ :Optional[int] = flow_network.graph
lowerCAmelCase__ :Optional[Any] = False
def snake_case ( self ):
'''simple docstring'''
if not self.executed:
self._algorithm()
lowerCAmelCase__ :List[Any] = True
def snake_case ( self ):
'''simple docstring'''
pass
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
# use this to save your result
lowerCAmelCase__ :Optional[int] = -1
def snake_case ( self ):
'''simple docstring'''
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
lowerCAmelCase__ :int = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowerCAmelCase__ :Union[str, Any] = [0] * self.verticies_count
lowerCAmelCase__ :Optional[int] = [0] * self.verticies_count
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowerCAmelCase__ :List[str] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowerCAmelCase__ :str = 0
while i < len(__UpperCAmelCase ):
lowerCAmelCase__ :Any = vertices_list[i]
lowerCAmelCase__ :List[Any] = self.heights[vertex_index]
self.process_vertex(__UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__UpperCAmelCase ) )
lowerCAmelCase__ :int = 0
else:
i += 1
lowerCAmelCase__ :Any = sum(self.preflow[self.source_index] )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__UpperCAmelCase , __UpperCAmelCase )
self.relabel(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowerCAmelCase__ :Union[str, Any] = self.heights[to_index]
if min_height is not None:
lowerCAmelCase__ :Optional[Any] = min_height + 1
if __name__ == "__main__":
__A = [0]
__A = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__A = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__A = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__A = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 93 |
'''simple docstring'''
import functools
def lowerCamelCase ( lowerCamelCase : list[int] , lowerCamelCase : list[int]):
# Validation
if not isinstance(lowerCamelCase , lowerCamelCase) or not all(isinstance(lowerCamelCase , lowerCamelCase) for day in days):
raise ValueError("""The parameter days should be a list of integers""")
if len(lowerCamelCase) != 3 or not all(isinstance(lowerCamelCase , lowerCamelCase) for cost in costs):
raise ValueError("""The parameter costs should be a list of three integers""")
if len(lowerCamelCase) == 0:
return 0
if min(lowerCamelCase) <= 0:
raise ValueError("""All days elements should be greater than 0""")
if max(lowerCamelCase) >= 366:
raise ValueError("""All days elements should be less than 366""")
A_ : Tuple = set(lowerCamelCase)
@functools.cache
def dynamic_programming(lowerCamelCase : int) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1)
return min(
costs[0] + dynamic_programming(index + 1) , costs[1] + dynamic_programming(index + 7) , costs[2] + dynamic_programming(index + 30) , )
return dynamic_programming(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
'''simple docstring'''
import numpy
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : numpy.ndarray , UpperCAmelCase : numpy.ndarray ) -> None:
'''simple docstring'''
lowercase : Any =input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowercase : str =numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowercase : int =numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowercase : List[str] =numpy.random.rand(3 , 1 )
# Real output values provided.
lowercase : Union[str, Any] =output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowercase : str =numpy.zeros(output_array.shape )
def A__ ( self : List[str] ) -> numpy.ndarray:
'''simple docstring'''
lowercase : Optional[Any] =sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowercase : Any =sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowercase : Optional[int] =sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def A__ ( self : Any ) -> None:
'''simple docstring'''
lowercase : List[Any] =numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowercase : Union[str, Any] =numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowercase : Optional[Any] =numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def A__ ( self : Tuple , UpperCAmelCase : numpy.ndarray , UpperCAmelCase : int , UpperCAmelCase : bool ) -> None:
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
lowercase : Any =self.feedforward()
self.back_propagation()
if give_loss:
lowercase : Optional[int] =numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'Iteration {iteration} Loss: {loss}' )
def A__ ( self : Dict , UpperCAmelCase : numpy.ndarray ) -> int:
'''simple docstring'''
lowercase : List[Any] =input_arr
lowercase : int =sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowercase : List[str] =sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowercase : Optional[Any] =sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowercase_ ( __A : numpy.ndarray ) -> numpy.ndarray:
"""simple docstring"""
return 1 / (1 + numpy.exp(-value ))
def lowercase_ ( __A : numpy.ndarray ) -> numpy.ndarray:
"""simple docstring"""
return (value) * (1 - (value))
def lowercase_ ( ) -> int:
"""simple docstring"""
lowercase : List[str] =numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowercase : Optional[Any] =numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowercase : Tuple =TwoHiddenLayerNeuralNetwork(
input_array=__A , output_array=__A )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=__A , iterations=1_0 , give_loss=__A )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 94 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCamelCase ( lowerCamelCase : NDArray[floataa] , lowerCamelCase : NDArray[floataa] , lowerCamelCase : list[int] , lowerCamelCase : int , ):
A_ , A_ : int = coefficient_matrix.shape
A_ , A_ : Union[str, Any] = constant_matrix.shape
if rowsa != colsa:
A_ : Any = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if colsa != 1:
A_ : Tuple = F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if rowsa != rowsa:
A_ : Dict = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(lowerCamelCase)
if len(lowerCamelCase) != rowsa:
A_ : Union[str, Any] = (
"""Number of initial values must be equal to number of rows in coefficient """
F'matrix but received {len(lowerCamelCase)} and {rowsa}'
)
raise ValueError(lowerCamelCase)
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""")
A_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1)
A_ , A_ : int = table.shape
strictly_diagonally_dominant(lowerCamelCase)
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase):
A_ : List[Any] = []
for row in range(lowerCamelCase):
A_ : int = 0
for col in range(lowerCamelCase):
if col == row:
A_ : List[str] = table[row][col]
elif col == cols - 1:
A_ : str = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ : Union[str, Any] = (temp + val) / denom
new_val.append(lowerCamelCase)
A_ : Tuple = new_val
return [float(lowerCamelCase) for i in new_val]
def lowerCamelCase ( lowerCamelCase : NDArray[floataa]):
A_ , A_ : Dict = table.shape
A_ : Union[str, Any] = True
for i in range(0 , lowerCamelCase):
A_ : str = 0
for j in range(0 , cols - 1):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""")
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCamelCase_ (__A , unittest.TestCase ):
__magic_name__ = BertTokenizer
__magic_name__ = BertTokenizerFast
__magic_name__ = True
__magic_name__ = True
__magic_name__ = filter_non_english
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
super().setUp()
UpperCAmelCase_ : Optional[int] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : List[str] ) -> List[str]:
UpperCAmelCase_ : str = "UNwant\u00E9d,running"
UpperCAmelCase_ : Optional[int] = "unwanted, running"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
UpperCAmelCase_ : Optional[int] = self.tokenizer_class(self.vocab_file )
UpperCAmelCase_ : int = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(lowerCAmelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [9, 6, 7, 12, 10, 11] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : str = self.get_rust_tokenizer()
UpperCAmelCase_ : str = "UNwant\u00E9d,running"
UpperCAmelCase_ : Any = tokenizer.tokenize(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Dict = self.get_rust_tokenizer()
UpperCAmelCase_ : int = tokenizer.encode(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# With lower casing
UpperCAmelCase_ : int = self.get_tokenizer(do_lower_case=lowerCAmelCase_ )
UpperCAmelCase_ : str = self.get_rust_tokenizer(do_lower_case=lowerCAmelCase_ )
UpperCAmelCase_ : Any = "UNwant\u00E9d,running"
UpperCAmelCase_ : Dict = tokenizer.tokenize(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Any = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = tokenizer.encode(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Dict = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
UpperCAmelCase_ : str = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = BasicTokenizer(do_lower_case=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : str = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
UpperCAmelCase_ : Any = BasicTokenizer(do_lower_case=lowerCAmelCase_ , strip_accents=lowerCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase_ : Dict = BasicTokenizer(do_lower_case=lowerCAmelCase_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = BasicTokenizer()
UpperCAmelCase_ : int = "a\n'll !!to?'d of, can't."
UpperCAmelCase_ : Union[str, Any] = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."]
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase_ ) , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
UpperCAmelCase_ : List[str] = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
UpperCAmelCase_ : int = {}
for i, token in enumerate(lowerCAmelCase_ ):
UpperCAmelCase_ : Tuple = i
UpperCAmelCase_ : Dict = WordpieceTokenizer(vocab=lowerCAmelCase_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : List[Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowerCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowerCAmelCase_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
UpperCAmelCase_ : str = self.tokenizer_class.from_pretrained("bert-base-uncased" )
UpperCAmelCase_ : Optional[Any] = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : str = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : str = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase_ , lowerCAmelCase_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : int = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCAmelCase_ : Dict = tokenizer_r.encode_plus(
lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , )
UpperCAmelCase_ : int = tokenizer_r.do_lower_case if hasattr(lowerCAmelCase_ , "do_lower_case" ) else False
UpperCAmelCase_ : Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
UpperCAmelCase_ : Dict = ["的", "人", "有"]
UpperCAmelCase_ : Union[str, Any] = "".join(lowerCAmelCase_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : str = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : int = False
UpperCAmelCase_ : str = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
UpperCAmelCase_ : int = tokenizer_r.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : int = tokenizer_p.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = tokenizer_r.convert_ids_to_tokens(lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = tokenizer_p.convert_ids_to_tokens(lowerCAmelCase_ )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase_ : Dict = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(lowerCAmelCase_ )
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 95 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
A_ : Any = len(lowerCamelCase)
A_ : Optional[Any] = len(lowerCamelCase)
A_ : Optional[int] = [[False for _ in range(m + 1)] for _ in range(n + 1)]
A_ : Union[str, Any] = True
for i in range(lowerCamelCase):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A_ : Optional[int] = True
if a[i].islower():
A_ : List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__lowerCamelCase = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def a ( __UpperCAmelCase : str = "dhaka" , __UpperCAmelCase : int = 5 ) -> int:
__magic_name__: Tuple = min(__UpperCAmelCase , 5_0 ) # Prevent abuse!
__magic_name__: Tuple = {
"""q""": query,
"""tbm""": """isch""",
"""hl""": """en""",
"""ijn""": """0""",
}
__magic_name__: Optional[Any] = requests.get("""https://www.google.com/search""" , params=__UpperCAmelCase , headers=__UpperCAmelCase )
__magic_name__: Tuple = BeautifulSoup(html.text , """html.parser""" )
__magic_name__: str = """""".join(
re.findall(R"""AF_initDataCallback\(([^<]+)\);""" , str(soup.select("""script""" ) ) ) )
__magic_name__: List[Any] = json.dumps(__UpperCAmelCase )
__magic_name__: int = json.loads(__UpperCAmelCase )
__magic_name__: Optional[Any] = re.findall(
R"""\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",""" , __UpperCAmelCase , )
if not matched_google_image_data:
return 0
__magic_name__: int = re.sub(
R"""\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]""" , """""" , str(__UpperCAmelCase ) , )
__magic_name__: Tuple = re.findall(
R"""(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]""" , __UpperCAmelCase , )
for index, fixed_full_res_image in enumerate(__UpperCAmelCase ):
if index >= max_images:
return index
__magic_name__: List[str] = bytes(__UpperCAmelCase , """ascii""" ).decode(
"""unicode-escape""" )
__magic_name__: Any = bytes(__UpperCAmelCase , """ascii""" ).decode(
"""unicode-escape""" )
__magic_name__: Any = urllib.request.build_opener()
__magic_name__: Union[str, Any] = [
(
"""User-Agent""",
"""Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582""",
)
]
urllib.request.install_opener(__UpperCAmelCase )
__magic_name__: Optional[int] = f'query_{query.replace(" " , "_" )}'
if not os.path.exists(__UpperCAmelCase ):
os.makedirs(__UpperCAmelCase )
urllib.request.urlretrieve( # noqa: S310
__UpperCAmelCase , f'{path_name}/original_size_img_{index}.jpg' )
return index
if __name__ == "__main__":
try:
__lowerCamelCase = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print('Please provide a search term.')
raise
| 96 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
a_ = 42
a_ = 42
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : list[list[Edge]] = [[] for _ in range(_a )]
A_ : List[Any] = size
def __getitem__( self : int ,_a : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _a ( self : str ):
'''simple docstring'''
return self._size
def _a ( self : str ,_a : int ,_a : int ,_a : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(_a ,_a ) )
def _a ( self : Dict ,_a : int ,_a : int ):
'''simple docstring'''
A_ : Tuple = deque([start_vertex] )
A_ : list[int | None] = [None] * self.size
A_ : Union[str, Any] = 0
while queue:
A_ : List[Any] = queue.popleft()
A_ : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A_ : Union[str, Any] = current_distance + edge.weight
A_ : Optional[Any] = distances[edge.destination_vertex]
if (
isinstance(_a ,_a )
and new_distance >= dest_vertex_distance
):
continue
A_ : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def a ( snake_case__: List[Any] , snake_case__: Dict , snake_case__: List[Any] , snake_case__: List[Any] ):
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def a ( snake_case__: List[str] , snake_case__: Any , snake_case__: List[str] , snake_case__: Optional[int] , snake_case__: List[Any]=True ):
'''simple docstring'''
model.train()
lowercase_ = model(snake_case__ )
lowercase_ = F.mse_loss(snake_case__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(snake_case__ )
def a ( snake_case__: Any , snake_case__: Optional[Any]=False ):
'''simple docstring'''
set_seed(42 )
lowercase_ = RegressionModel()
lowercase_ = deepcopy(snake_case__ )
lowercase_ = RegressionDataset(length=80 )
lowercase_ = DataLoader(snake_case__ , batch_size=16 )
model.to(accelerator.device )
if sched:
lowercase_ = AdamW(params=model.parameters() , lr=1e-3 )
lowercase_ = AdamW(params=ddp_model.parameters() , lr=1e-3 )
lowercase_ = LambdaLR(snake_case__ , lr_lambda=lambda snake_case__ : epoch**0.6_5 )
lowercase_ = LambdaLR(snake_case__ , lr_lambda=lambda snake_case__ : epoch**0.6_5 )
# Make a copy of `model`
if sched:
lowercase_ , lowercase_ , lowercase_ , lowercase_ = accelerator.prepare(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
lowercase_ , lowercase_ = accelerator.prepare(snake_case__ , snake_case__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def a ( snake_case__: Union[str, Any] ):
'''simple docstring'''
# Test when on a single CPU or GPU that the context manager does nothing
lowercase_ , lowercase_ , lowercase_ = get_training_setup(snake_case__ )
# Use a single batch
lowercase_ , lowercase_ = next(iter(snake_case__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase_ , lowercase_ = accelerator.gather((ddp_input, ddp_target) )
lowercase_ , lowercase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
# Sync grads
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
lowercase_ = ddp_input[torch.randperm(len(snake_case__ ) )]
def a ( snake_case__: Optional[Any] ):
'''simple docstring'''
# Test on distributed setup that context manager behaves properly
lowercase_ , lowercase_ , lowercase_ = get_training_setup(snake_case__ )
# Use a single batch
lowercase_ , lowercase_ = next(iter(snake_case__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowercase_ , lowercase_ = accelerator.gather((ddp_input, ddp_target) )
lowercase_ , lowercase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
# Sync grads
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
lowercase_ = ddp_input[torch.randperm(len(snake_case__ ) )]
def a ( snake_case__: int=False , snake_case__: Union[str, Any]=False ):
'''simple docstring'''
lowercase_ = Accelerator(
split_batches=snake_case__ , dispatch_batches=snake_case__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase_ , lowercase_ , lowercase_ = get_training_setup(snake_case__ )
for iteration, batch in enumerate(snake_case__ ):
lowercase_ , lowercase_ = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase_ , lowercase_ = accelerator.gather((ddp_input, ddp_target) )
lowercase_ , lowercase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(snake_case__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
lowercase_ = ddp_input[torch.randperm(len(snake_case__ ) )]
GradientState._reset_state()
def a ( snake_case__: Optional[int]=False , snake_case__: List[str]=False ):
'''simple docstring'''
lowercase_ = Accelerator(
split_batches=snake_case__ , dispatch_batches=snake_case__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = get_training_setup(snake_case__ , snake_case__ )
for iteration, batch in enumerate(snake_case__ ):
lowercase_ , lowercase_ = batch.values()
# Gather the distributed inputs and targs for the base model
lowercase_ , lowercase_ = accelerator.gather((ddp_input, ddp_target) )
lowercase_ , lowercase_ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(snake_case__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(snake_case__ ):
step_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n'''
lowercase_ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(snake_case__ ))
if accelerator.num_processes > 1:
check_model_parameters(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def a ( ):
'''simple docstring'''
lowercase_ = Accelerator()
lowercase_ = RegressionDataset(length=80 )
lowercase_ = DataLoader(snake_case__ , batch_size=16 )
lowercase_ = RegressionDataset(length=96 )
lowercase_ = DataLoader(snake_case__ , batch_size=16 )
lowercase_ , lowercase_ = accelerator.prepare(snake_case__ , snake_case__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(snake_case__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case__ )
if iteration < len(snake_case__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(snake_case__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(snake_case__ )
if batch_num < len(snake_case__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def a ( ):
'''simple docstring'''
lowercase_ = Accelerator()
lowercase_ = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(snake_case__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(snake_case__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(snake_case__ , snake_case__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(snake_case__ , snake_case__ )
def a ( snake_case__: List[str] ):
'''simple docstring'''
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 97 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int = 10**9):
A_ : Optional[int] = 1
A_ : int = 2
A_ : List[Any] = 0
A_ : Optional[Any] = 0
A_ : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
A_ : Optional[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 665 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=__magic_name__ )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_snake_case : ClassVar[Features] = Features({'audio': Audio()} )
_snake_case : ClassVar[Features] = Features({'labels': ClassLabel} )
_snake_case : str = "audio"
_snake_case : str = "labels"
def snake_case__ ( self : Dict , lowerCAmelCase__ : List[str] ) -> Any:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , lowerCAmelCase__ ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
_UpperCamelCase = copy.deepcopy(self )
_UpperCamelCase = self.label_schema.copy()
_UpperCamelCase = features[self.label_column]
_UpperCamelCase = label_schema
return task_template
@property
def snake_case__ ( self : int ) -> Dict[str, str]:
'''simple docstring'''
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 98 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase ( ):
A_ : Optional[int] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCamelCase)
A_ : Optional[int] = parser.add_subparsers(help="""accelerate command helpers""")
# Register commands
get_config_parser(subparsers=lowerCamelCase)
env_command_parser(subparsers=lowerCamelCase)
launch_command_parser(subparsers=lowerCamelCase)
tpu_command_parser(subparsers=lowerCamelCase)
test_command_parser(subparsers=lowerCamelCase)
# Let's go
A_ : Dict = parser.parse_args()
if not hasattr(lowerCamelCase , """func"""):
parser.print_help()
exit(1)
# Run
args.func(lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 0 |
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
__a = len(lowerCAmelCase__ )
__a = []
for i in range(len(lowerCAmelCase__ ) - pat_len + 1 ):
__a = True
for j in range(lowerCAmelCase__ ):
if s[i + j] != pattern[j]:
__a = False
break
if match_found:
position.append(lowerCAmelCase__ )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 99 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , ) -> int:
SCREAMING_SNAKE_CASE__ = {}
if train_file is not None:
SCREAMING_SNAKE_CASE__ = [train_file]
if eval_file is not None:
SCREAMING_SNAKE_CASE__ = [eval_file]
if test_file is not None:
SCREAMING_SNAKE_CASE__ = [test_file]
SCREAMING_SNAKE_CASE__ = datasets.load_dataset('''csv''' , data_files=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = list(ds[list(files.keys() )[0]].features.keys() )
SCREAMING_SNAKE_CASE__ = features_name.pop(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = list(set(ds[list(files.keys() )[0]][label_name] ) )
SCREAMING_SNAKE_CASE__ = {label: i for i, label in enumerate(lowerCAmelCase_ )}
SCREAMING_SNAKE_CASE__ = tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ = {}
if len(lowerCAmelCase_ ) == 1:
for k in files.keys():
SCREAMING_SNAKE_CASE__ = ds[k].map(
lambda lowerCAmelCase_ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' ) , batched=lowerCAmelCase_ , )
elif len(lowerCAmelCase_ ) == 2:
for k in files.keys():
SCREAMING_SNAKE_CASE__ = ds[k].map(
lambda lowerCAmelCase_ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding='''max_length''' , ) , batched=lowerCAmelCase_ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
SCREAMING_SNAKE_CASE__ = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
SCREAMING_SNAKE_CASE__ = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
SCREAMING_SNAKE_CASE__ = {k: v for k, v in ex.items() if k in input_names}
SCREAMING_SNAKE_CASE__ = labelaid[ex[label_name]]
yield (d, label)
SCREAMING_SNAKE_CASE__ = (
tf.data.Dataset.from_generator(
lowerCAmelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
SCREAMING_SNAKE_CASE__ = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
SCREAMING_SNAKE_CASE__ = (
tf.data.Dataset.from_generator(
lowerCAmelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
SCREAMING_SNAKE_CASE__ = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
SCREAMING_SNAKE_CASE__ = (
tf.data.Dataset.from_generator(
lowerCAmelCase_ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
SCREAMING_SNAKE_CASE__ = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_A : Any = logging.getLogger(__name__)
@dataclass
class __snake_case :
'''simple docstring'''
lowerCamelCase__ : int = field(metadata={"""help""": """Which column contains the label"""} )
lowerCamelCase__ : str = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The path of the training file"""} )
lowerCamelCase__ : Optional[str] = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The path of the development file"""} )
lowerCamelCase__ : Optional[str] = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The path of the test file"""} )
lowerCamelCase__ : int = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowerCamelCase__ : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class __snake_case :
'''simple docstring'''
lowerCamelCase__ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowerCamelCase__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowerCamelCase__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowerCamelCase__ : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCamelCase__ : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
def __snake_case ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowerCAmelCase_ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowerCAmelCase_ ) , labelaid=lowerCAmelCase_ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
SCREAMING_SNAKE_CASE__ = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , )
def compute_metrics(lowerCAmelCase_ ) -> Dict:
SCREAMING_SNAKE_CASE__ = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
SCREAMING_SNAKE_CASE__ = TFTrainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=lowerCAmelCase_ , eval_dataset=lowerCAmelCase_ , compute_metrics=lowerCAmelCase_ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE__ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE__ = trainer.evaluate()
SCREAMING_SNAKE_CASE__ = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(lowerCAmelCase_ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(lowerCAmelCase_ )
return results
if __name__ == "__main__":
main()
| 100 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['YolosFeatureExtractor']
__magic_name__ = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 0 |
from __future__ import annotations
def a__ ( A__, A__ = None ):
SCREAMING_SNAKE_CASE_ : str = word_bank or []
# create a table
SCREAMING_SNAKE_CASE_ : int = len(A__ ) + 1
SCREAMING_SNAKE_CASE_ : list[list[list[str]]] = []
for _ in range(A__ ):
table.append([] )
# seed value
SCREAMING_SNAKE_CASE_ : Any = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(A__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(A__ )] == word:
SCREAMING_SNAKE_CASE_ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(A__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(A__ )]:
combination.reverse()
return table[len(A__ )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 101 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 0 |
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__magic_name__ : str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowercase__ ( datasets.BuilderConfig ):
"""simple docstring"""
__lowerCAmelCase : Optional[datasets.Features] = None
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
import pyspark
def generate_fn():
UpperCamelCase : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
UpperCamelCase : List[str] = df_with_partition_id.select("""*""" ).where(f"""part_id = {partition_id}""" ).drop("""part_id""" )
UpperCamelCase : Optional[Any] = partition_df.collect()
UpperCamelCase : Union[str, Any] = 0
for row in rows:
yield f"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class lowercase__ ( _BaseExamplesIterable ):
"""simple docstring"""
def __init__( self , _A , _A=None , ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = df
UpperCamelCase : Tuple = partition_order or range(self.df.rdd.getNumPartitions() )
UpperCamelCase : Union[str, Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ):
'''simple docstring'''
yield from self.generate_examples_fn()
def _a ( self , _A ):
'''simple docstring'''
UpperCamelCase : int = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(_A )
return SparkExamplesIterable(self.df , partition_order=_A )
def _a ( self , _A , _A ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.split_shard_indices_by_worker(_A , _A )
return SparkExamplesIterable(self.df , partition_order=_A )
@property
def _a ( self ):
'''simple docstring'''
return len(self.partition_order )
class lowercase__ ( datasets.DatasetBuilder ):
"""simple docstring"""
__lowerCAmelCase : Dict = SparkConfig
def __init__( self , _A , _A = None , _A = None , **_A , ):
'''simple docstring'''
import pyspark
UpperCamelCase : Optional[int] = pyspark.sql.SparkSession.builder.getOrCreate()
UpperCamelCase : List[str] = df
UpperCamelCase : Optional[Any] = working_dir
super().__init__(
cache_dir=_A , config_name=str(self.df.semanticHash() ) , **_A , )
def _a ( self ):
'''simple docstring'''
def create_cache_and_write_probe(_A ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=_A )
UpperCamelCase : List[Any] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(_A , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
UpperCamelCase : Tuple = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(_A ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def _a ( self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def _a ( self , _A ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def _a ( self , _A ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(_A ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
UpperCamelCase : Tuple = self.df.count()
UpperCamelCase : Union[str, Any] = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
UpperCamelCase : List[Any] = (
self.df.limit(_A )
.repartition(1 )
.mapInArrow(_A , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
UpperCamelCase : str = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
UpperCamelCase : int = min(_A , int(approx_total_size / max_shard_size ) )
UpperCamelCase : Optional[Any] = self.df.repartition(_A )
def _a ( self , _A , _A , _A , ):
'''simple docstring'''
import pyspark
UpperCamelCase : List[Any] = ParquetWriter if file_format == """parquet""" else ArrowWriter
UpperCamelCase : Dict = os.path.join(self._working_dir , os.path.basename(_A ) ) if self._working_dir else fpath
UpperCamelCase : List[Any] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
UpperCamelCase : Optional[int] = self.config.features
UpperCamelCase : Optional[Any] = self._writer_batch_size
UpperCamelCase : Union[str, Any] = self._fs.storage_options
def write_arrow(_A ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
UpperCamelCase : Optional[int] = pyspark.TaskContext().taskAttemptId()
UpperCamelCase : str = next(_A , _A )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
UpperCamelCase : str = 0
UpperCamelCase : Union[str, Any] = writer_class(
features=_A , path=working_fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , writer_batch_size=_A , storage_options=_A , embed_local_files=_A , )
UpperCamelCase : List[str] = pa.Table.from_batches([first_batch] )
writer.write_table(_A )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
UpperCamelCase , UpperCamelCase : Any = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
UpperCamelCase : Any = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , writer_batch_size=_A , storage_options=_A , embed_local_files=_A , )
UpperCamelCase : Dict = pa.Table.from_batches([batch] )
writer.write_table(_A )
if writer._num_bytes > 0:
UpperCamelCase , UpperCamelCase : Any = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(_A ) ):
UpperCamelCase : Optional[int] = os.path.join(os.path.dirname(_A ) , os.path.basename(_A ) )
shutil.move(_A , _A )
UpperCamelCase : int = (
self.df.mapInArrow(_A , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def _a ( self , _A , _A = "arrow" , _A = None , _A = None , **_A , ):
'''simple docstring'''
self._validate_cache_dir()
UpperCamelCase : Dict = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(_A )
UpperCamelCase : Optional[int] = not is_remote_filesystem(self._fs )
UpperCamelCase : Dict = os.path.join if is_local else posixpath.join
UpperCamelCase : Optional[int] = """-TTTTT-SSSSS-of-NNNNN"""
UpperCamelCase : Dict = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
UpperCamelCase : Any = path_join(self._output_dir , _A )
UpperCamelCase : Any = 0
UpperCamelCase : List[str] = 0
UpperCamelCase : str = 0
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : str = []
for task_id, content in self._prepare_split_single(_A , _A , _A ):
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Dict = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(_A )
UpperCamelCase : str = total_num_examples
UpperCamelCase : Tuple = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
UpperCamelCase : List[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
UpperCamelCase : List[Any] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
_A , _A , _A , ):
rename(
_A , fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , fpath.replace("""TTTTT-SSSSS""" , f"""{global_shard_id:05d}""" ).replace("""NNNNN""" , f"""{total_shards:05d}""" ) , )
UpperCamelCase : str = []
UpperCamelCase : List[Any] = 0
for i in range(len(_A ) ):
UpperCamelCase , UpperCamelCase : Dict = task_id_and_num_shards[i]
for shard_id in range(_A ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(_A , len(_A ) ).map(lambda _A : _rename_shard(*_A ) ).collect()
else:
# don't use any pattern
UpperCamelCase : int = 0
UpperCamelCase : Any = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , f"""{shard_id:05d}""" ).replace("""TTTTT""" , f"""{task_id:05d}""" ) , fpath.replace(_A , """""" ) , )
def _a ( self , _A , ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 102 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : str = [0] * len(lowerCamelCase)
A_ : Union[str, Any] = []
A_ : Union[str, Any] = []
A_ : Tuple = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCamelCase)):
if indegree[i] == 0:
queue.append(lowerCamelCase)
while queue:
A_ : Any = queue.pop(0)
cnt += 1
topo.append(lowerCamelCase)
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowerCamelCase)
if cnt != len(lowerCamelCase):
print("""Cycle exists""")
else:
print(lowerCamelCase)
# Adjacency List of Graph
__magic_name__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 665 | 0 |
"""simple docstring"""
snake_case = [
'''DownloadConfig''',
'''DownloadManager''',
'''DownloadMode''',
'''StreamingDownloadManager''',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 103 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[str]=7 ,_a : Dict=True ,_a : List[Any]=True ,_a : Dict=False ,_a : Optional[int]=True ,_a : List[Any]=99 ,_a : Any=32 ,_a : Optional[int]=5 ,_a : List[Any]=4 ,_a : int=37 ,_a : List[Any]="gelu" ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : Any=512 ,_a : int=16 ,_a : Optional[int]=2 ,_a : Any=0.02 ,_a : Any=3 ,_a : Any=4 ,_a : List[str]=None ,):
'''simple docstring'''
A_ : List[str] = parent
A_ : Any = batch_size
A_ : Tuple = seq_length
A_ : List[str] = is_training
A_ : Tuple = use_input_mask
A_ : Dict = use_token_type_ids
A_ : List[Any] = use_labels
A_ : Union[str, Any] = vocab_size
A_ : Any = hidden_size
A_ : str = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : str = intermediate_size
A_ : Tuple = hidden_act
A_ : Any = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : int = type_vocab_size
A_ : Union[str, Any] = type_sequence_label_size
A_ : Any = initializer_range
A_ : List[Any] = num_labels
A_ : Optional[Any] = num_choices
A_ : List[Any] = scope
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : int = None
if self.use_input_mask:
A_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Dict = None
if self.use_token_type_ids:
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : str = None
A_ : Any = None
A_ : str = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
A_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
def _a ( self : Union[str, Any] ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Any ,_a : Any ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Tuple ):
'''simple docstring'''
A_ : Any = LlamaModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[Any] = model(_a ,attention_mask=_a )
A_ : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Optional[int] ,_a : int ,_a : List[str] ,_a : Any ,_a : Any ,_a : Dict ,_a : List[str] ,_a : Optional[int] ,_a : Any ,_a : List[str] ,):
'''simple docstring'''
A_ : List[str] = True
A_ : Union[str, Any] = LlamaModel(_a )
model.to(_a )
model.eval()
A_ : Tuple = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,)
A_ : List[Any] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,)
A_ : int = model(_a ,attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Any ,_a : Any ,_a : Optional[int] ,_a : List[Any] ,_a : List[Any] ,_a : Dict ,_a : Tuple ,_a : Optional[int] ,_a : List[Any] ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : List[Any] = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
A_ : Dict = model(_a ,attention_mask=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : str ,_a : List[Any] ,_a : Dict ,_a : str ,_a : Tuple ,_a : Tuple ,_a : Tuple ,_a : Optional[Any] ,_a : Dict ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : Optional[Any] = True
A_ : Any = True
A_ : Tuple = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
A_ : Optional[int] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,use_cache=_a ,)
A_ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A_ : List[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
A_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
A_ : int = torch.cat([input_mask, next_mask] ,dim=-1 )
A_ : List[str] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
A_ : Any = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,past_key_values=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
# select random slice
A_ : List[str] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
A_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a ,_a ,atol=1e-3 ) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Any = config_and_inputs
A_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
a_ = (LlamaForCausalLM,) if is_torch_available() else ()
a_ = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = LlamaModelTester(self )
A_ : List[str] = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def _a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : Dict = type
self.model_tester.create_and_check_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = 3
A_ : Any = input_dict["""input_ids"""]
A_ : Union[str, Any] = input_ids.ne(1 ).to(_a )
A_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : int = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Dict ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : str = 3
A_ : Union[str, Any] = """single_label_classification"""
A_ : Union[str, Any] = input_dict["""input_ids"""]
A_ : List[Any] = input_ids.ne(1 ).to(_a )
A_ : Dict = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : List[str] = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = 3
A_ : Dict = """multi_label_classification"""
A_ : Any = input_dict["""input_ids"""]
A_ : Optional[Any] = input_ids.ne(1 ).to(_a )
A_ : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
A_ : Optional[int] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def _a ( self : Any ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _a ( self : Optional[Any] ,_a : List[Any] ):
'''simple docstring'''
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ids_tensor([1, 10] ,config.vocab_size )
A_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : int = LlamaModel(_a )
original_model.to(_a )
original_model.eval()
A_ : Tuple = original_model(_a ).last_hidden_state
A_ : Union[str, Any] = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : Tuple = {"""type""": scaling_type, """factor""": 10.0}
A_ : int = LlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
A_ : List[Any] = scaled_model(_a ).last_hidden_state
A_ : Any = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a ,_a ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Any = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" ,device_map="""auto""" )
A_ : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : str ):
'''simple docstring'''
A_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""auto""" )
A_ : int = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : Optional[int] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
A_ : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
A_ : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
A_ : List[str] = """Simply put, the theory of relativity states that """
A_ : Any = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
A_ : Union[str, Any] = tokenizer.encode(_a ,return_tensors="""pt""" )
A_ : List[str] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""sequential""" ,use_safetensors=_a )
# greedy generation outputs
A_ : str = model.generate(_a ,max_new_tokens=64 ,top_p=_a ,temperature=1 ,do_sample=_a )
A_ : Optional[Any] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=_a )
self.assertEqual(_a ,_a )
| 665 | 0 |
"""simple docstring"""
import math
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(UpperCAmelCase_ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( UpperCAmelCase_ : int = 10001 ) -> int:
"""simple docstring"""
try:
A__ = int(UpperCAmelCase_ )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
A__ = []
A__ = 2
while len(UpperCAmelCase_ ) < nth:
if is_prime(UpperCAmelCase_ ):
primes.append(UpperCAmelCase_ )
num += 1
else:
num += 1
return primes[len(UpperCAmelCase_ ) - 1]
if __name__ == "__main__":
print(f'{solution() = }')
| 104 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__magic_name__ = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__magic_name__ = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__magic_name__ = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def _a ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) ,homepage="""https://github.com/hendrycks/math""" ,codebase_urls=["""https://github.com/hendrycks/math"""] ,)
def _a ( self : List[Any] ,_a : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = 0.0
for i, j in zip(_a ,_a ):
n_correct += 1.0 if math_equivalence.is_equiv(_a ,_a ) else 0.0
A_ : List[str] = n_correct / len(_a )
return {
"accuracy": accuracy,
}
| 665 | 0 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def __UpperCAmelCase ( lowerCamelCase_ : dict ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def __UpperCAmelCase ( lowerCamelCase_ : np.ndarray , lowerCamelCase_ : np.ndarray , lowerCamelCase_ : np.ndarray ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCamelCase_ , lowerCamelCase_ )
# Predict target for test data
SCREAMING_SNAKE_CASE_ : Dict = xgb.predict(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = predictions.reshape(len(lowerCamelCase_ ) , 1 )
return predictions
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = fetch_california_housing()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = data_handling(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = train_test_split(
lowerCamelCase_ , lowerCamelCase_ , test_size=0.2_5 , random_state=1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = xgboost(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Error printing
print(F'Mean Absolute Error : {mean_absolute_error(lowerCamelCase_ , lowerCamelCase_ )}' )
print(F'Mean Square Error : {mean_squared_error(lowerCamelCase_ , lowerCamelCase_ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 105 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """retribert"""
def __init__( self : int ,_a : Dict=30522 ,_a : List[Any]=768 ,_a : Optional[Any]=8 ,_a : str=12 ,_a : str=3072 ,_a : Tuple="gelu" ,_a : Optional[int]=0.1 ,_a : Dict=0.1 ,_a : List[Any]=512 ,_a : Union[str, Any]=2 ,_a : Tuple=0.02 ,_a : List[str]=1e-12 ,_a : Dict=True ,_a : Tuple=128 ,_a : Optional[int]=0 ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,**_a )
A_ : Dict = vocab_size
A_ : int = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : int = intermediate_size
A_ : Tuple = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : Optional[int] = initializer_range
A_ : Dict = layer_norm_eps
A_ : str = share_encoders
A_ : List[Any] = projection_dim
| 665 | 0 |
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
A_ : Optional[Any] = StableDiffusionControlNetImgaImgPipeline
A_ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
A_ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
A_ : str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
A = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
A = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
A = CLIPTextModel(__UpperCamelCase )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCamelCase ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any]=0 ) -> Union[str, Any]:
if str(__UpperCamelCase ).startswith('mps' ):
A = torch.manual_seed(__UpperCamelCase )
else:
A = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
A = 2
A = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__UpperCamelCase , device=torch.device(__UpperCamelCase ) , )
A = floats_tensor(control_image.shape , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('RGB' ).resize((64, 64) )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
A_ : str = StableDiffusionControlNetImgaImgPipeline
A_ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
A_ : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
A_ : Dict = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(__UpperCamelCase : Dict ):
if isinstance(__UpperCamelCase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
A = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__UpperCamelCase )
torch.manual_seed(0 )
A = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(__UpperCamelCase )
torch.manual_seed(0 )
A = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
A = CLIPTextModel(__UpperCamelCase )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = MultiControlNetModel([controlneta, controlneta] )
A = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int]=0 ) -> Optional[Any]:
if str(__UpperCamelCase ).startswith('mps' ):
A = torch.manual_seed(__UpperCamelCase )
else:
A = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
A = 2
A = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__UpperCamelCase , device=torch.device(__UpperCamelCase ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=__UpperCamelCase , device=torch.device(__UpperCamelCase ) , ),
]
A = floats_tensor(control_image[0].shape , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('RGB' ).resize((64, 64) )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def __UpperCamelCase ( self : Any ) -> List[Any]:
A = self.get_dummy_components()
A = self.pipeline_class(**__UpperCamelCase )
pipe.to(__UpperCamelCase )
A = 1_0.0
A = 4
A = self.get_dummy_inputs(__UpperCamelCase )
A = steps
A = scale
A = pipe(**__UpperCamelCase )[0]
A = self.get_dummy_inputs(__UpperCamelCase )
A = steps
A = scale
A = pipe(**__UpperCamelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
A = self.get_dummy_inputs(__UpperCamelCase )
A = steps
A = scale
A = pipe(**__UpperCamelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
A = self.get_dummy_inputs(__UpperCamelCase )
A = steps
A = scale
A = pipe(**__UpperCamelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def __UpperCamelCase ( self : Dict ) -> Dict:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def __UpperCamelCase ( self : Any ) -> List[Any]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def __UpperCamelCase ( self : Optional[int] ) -> int:
A = self.get_dummy_components()
A = self.pipeline_class(**__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(__UpperCamelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : List[Any] ) -> str:
A = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
A = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=__UpperCamelCase , controlnet=__UpperCamelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = torch.Generator(device='cpu' ).manual_seed(0 )
A = 'evil space-punk bird'
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((512, 512) )
A = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((512, 512) )
A = pipe(
__UpperCamelCase , __UpperCamelCase , control_image=__UpperCamelCase , generator=__UpperCamelCase , output_type='np' , num_inference_steps=50 , strength=0.6 , )
A = output.images[0]
assert image.shape == (512, 512, 3)
A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9e-2 | 106 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'spiece.model'}
__magic_name__ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
__magic_name__ = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = []
def __init__( self : Optional[int] ,_a : int ,_a : Optional[Any]="<unk>" ,_a : int="<s>" ,_a : str="</s>" ,_a : Optional[Any]="<pad>" ,_a : Tuple="[SEP]" ,_a : Tuple="[MASK]" ,_a : Union[str, Any]="[CLS]" ,_a : Optional[Dict[str, Any]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : List[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
A_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,pad_token=_a ,sep_token=_a ,mask_token=_a ,cls_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
A_ : Optional[int] = vocab_file
A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Tuple = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self.__dict__.copy()
A_ : Union[str, Any] = None
return state
def __setstate__( self : List[Any] ,_a : Any ):
'''simple docstring'''
A_ : Tuple = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
A_ : Tuple = {}
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self : Union[str, Any] ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def _a ( self : Optional[int] ,_a : str ):
'''simple docstring'''
return self.sp_model.piece_to_id(_a )
def _a ( self : int ,_a : Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.sp_model.IdToPiece(_a )
return token
def _a ( self : Dict ,_a : int ):
'''simple docstring'''
A_ : int = []
A_ : Any = """"""
A_ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
A_ : Dict = True
A_ : Union[str, Any] = []
else:
current_sub_tokens.append(_a )
A_ : str = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def _a ( self : int ,_a : List[int] ,_a : bool = False ,_a : bool = None ,_a : bool = True ,**_a : str ,):
'''simple docstring'''
A_ : Any = kwargs.pop("""use_source_tokenizer""" ,_a )
A_ : Union[str, Any] = self.convert_ids_to_tokens(_a ,skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A_ : str = []
A_ : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
A_ : List[str] = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
A_ : Optional[int] = re.sub(r""" (\[(MASK|SEP)\])""" ,r"""\1""" ,""" """.join(_a ) )
else:
A_ : Tuple = """""".join(_a )
A_ : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A_ : Optional[Any] = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,"""wb""" ) as fi:
A_ : str = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def _a ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : List[Any] = [self.cls_token_id]
A_ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def _a ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Tuple = [self.sep_token_id]
A_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 665 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : List[str] ) -> Any:
_A = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
_A = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]], dtype=tf.intaa, ) # J'aime le camembert !"
_A = model(UpperCamelCase__ )['last_hidden_state']
_A = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape, UpperCamelCase__ )
# compare the actual values for a slice.
_A = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]], dtype=tf.floataa, )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1e-4 ) )
| 107 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _a ( self : List[str] ,_a : int ,_a : Any ,_a : int ):
'''simple docstring'''
A_ : Dict = TextaTextGenerationPipeline(model=_a ,tokenizer=_a )
return generator, ["Something to write", "Something else"]
def _a ( self : str ,_a : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : Any = generator("""Something there""" )
self.assertEqual(_a ,[{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
A_ : List[Any] = generator(["""This is great !""", """Something else"""] ,num_return_sequences=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
A_ : List[str] = generator(
["""This is great !""", """Something else"""] ,num_return_sequences=2 ,batch_size=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
with self.assertRaises(_a ):
generator(4 )
@require_torch
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : int = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""pt""" )
# do_sample=False necessary for reproducibility
A_ : Tuple = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
A_ : Optional[int] = 3
A_ : Tuple = generator(
"""Something there""" ,num_return_sequences=_a ,num_beams=_a ,)
A_ : Optional[Any] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a ,_a )
A_ : Optional[int] = generator("""This is a test""" ,do_sample=_a ,num_return_sequences=2 ,return_tensors=_a )
self.assertEqual(
_a ,[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] ,)
A_ : Dict = generator.model.config.eos_token_id
A_ : Optional[int] = """<pad>"""
A_ : List[Any] = generator(
["""This is a test""", """This is a second test"""] ,do_sample=_a ,num_return_sequences=2 ,batch_size=2 ,return_tensors=_a ,)
self.assertEqual(
_a ,[
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] ,)
@require_tf
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""tf""" )
# do_sample=False necessary for reproducibility
A_ : Dict = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
| 665 | 0 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def lowerCamelCase ( self : str , lowerCamelCase : str ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError()
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
raise NotImplementedError()
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : str , lowerCamelCase : "AutoTokenizer" , lowerCamelCase : bool = False , **lowerCamelCase : Any ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = tokenizer
_UpperCAmelCase = skip_prompt
_UpperCAmelCase = decode_kwargs
# variables used in the streaming process
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = True
def lowerCamelCase ( self : Any , lowerCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
_UpperCAmelCase = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
_UpperCAmelCase = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
_UpperCAmelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
_UpperCAmelCase = text[self.print_len :]
_UpperCAmelCase = []
_UpperCAmelCase = 0
# If the last token is a CJK character, we print the characters.
elif len(lowerCamelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
_UpperCAmelCase = text[self.print_len :]
self.print_len += len(lowerCamelCase )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
_UpperCAmelCase = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(lowerCamelCase )
self.on_finalized_text(lowerCamelCase )
def lowerCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
_UpperCAmelCase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
_UpperCAmelCase = text[self.print_len :]
_UpperCAmelCase = []
_UpperCAmelCase = 0
else:
_UpperCAmelCase = """"""
_UpperCAmelCase = True
self.on_finalized_text(lowerCamelCase , stream_end=lowerCamelCase )
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : str , lowerCamelCase : bool = False ) -> Any:
"""simple docstring"""
print(lowerCamelCase , flush=lowerCamelCase , end="""""" if not stream_end else None )
def lowerCamelCase ( self : int , lowerCamelCase : Any ) -> int:
"""simple docstring"""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : "AutoTokenizer" , lowerCamelCase : bool = False , lowerCamelCase : Optional[float] = None , **lowerCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
super().__init__(lowerCamelCase , lowerCamelCase , **lowerCamelCase )
_UpperCAmelCase = Queue()
_UpperCAmelCase = None
_UpperCAmelCase = timeout
def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : str , lowerCamelCase : bool = False ) -> Optional[int]:
"""simple docstring"""
self.text_queue.put(lowerCamelCase , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return self
def lowerCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value | 108 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """gpt_bigcode"""
a_ = ["""past_key_values"""]
a_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int] ,_a : Optional[int]=50257 ,_a : Dict=1024 ,_a : Union[str, Any]=768 ,_a : Union[str, Any]=12 ,_a : Union[str, Any]=12 ,_a : Tuple=None ,_a : int="gelu_pytorch_tanh" ,_a : Optional[Any]=0.1 ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : List[Any]=1e-5 ,_a : List[str]=0.02 ,_a : Any=True ,_a : Union[str, Any]=True ,_a : Tuple=50256 ,_a : Optional[int]=50256 ,_a : int=True ,_a : Optional[int]=True ,_a : Optional[int]=True ,**_a : List[str] ,):
'''simple docstring'''
A_ : Optional[Any] = vocab_size
A_ : int = n_positions
A_ : Union[str, Any] = n_embd
A_ : int = n_layer
A_ : Optional[int] = n_head
A_ : Union[str, Any] = n_inner
A_ : List[Any] = activation_function
A_ : Dict = resid_pdrop
A_ : int = embd_pdrop
A_ : Optional[int] = attn_pdrop
A_ : Union[str, Any] = layer_norm_epsilon
A_ : int = initializer_range
A_ : Union[str, Any] = scale_attn_weights
A_ : List[str] = use_cache
A_ : Tuple = attention_softmax_in_fpaa
A_ : List[str] = scale_attention_softmax_in_fpaa
A_ : Union[str, Any] = multi_query
A_ : Any = bos_token_id
A_ : Optional[int] = eos_token_id
super().__init__(bos_token_id=_a ,eos_token_id=_a ,**_a )
| 665 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __magic_name__ ( __UpperCAmelCase="" ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
return os.path.join(__UpperCAmelCase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.rand(12 ,dtype=torch.floataa ) - 0.5
__SCREAMING_SNAKE_CASE = AgentAudio(lowerCamelCase )
__SCREAMING_SNAKE_CASE = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase ,agent_type.to_raw() ,atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCamelCase ) )
# Ensure that the file contains the same value as the original tensor
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sf.read(lowerCamelCase )
self.assertTrue(torch.allclose(lowerCamelCase ,torch.tensor(lowerCamelCase ) ,atol=1E-4 ) )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.rand(12 ,dtype=torch.floataa ) - 0.5
__SCREAMING_SNAKE_CASE = get_new_path(suffix=""".wav""" )
sf.write(lowerCamelCase ,lowerCamelCase ,1_6000 )
__SCREAMING_SNAKE_CASE = AgentAudio(lowerCamelCase )
self.assertTrue(torch.allclose(lowerCamelCase ,agent_type.to_raw() ,atol=1E-4 ) )
self.assertEqual(agent_type.to_string() ,lowerCamelCase )
@require_vision
@require_torch
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.randint(0 ,256 ,(64, 64, 3) )
__SCREAMING_SNAKE_CASE = AgentImage(lowerCamelCase )
__SCREAMING_SNAKE_CASE = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase ,agent_type._tensor ,atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() ,Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase ) )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
__SCREAMING_SNAKE_CASE = Image.open(lowerCamelCase )
__SCREAMING_SNAKE_CASE = AgentImage(lowerCamelCase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase ) )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
__SCREAMING_SNAKE_CASE = Image.open(lowerCamelCase )
__SCREAMING_SNAKE_CASE = AgentImage(lowerCamelCase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase ) )
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """Hey!"""
__SCREAMING_SNAKE_CASE = AgentText(lowerCamelCase )
self.assertEqual(lowerCamelCase ,agent_type.to_string() )
self.assertEqual(lowerCamelCase ,agent_type.to_raw() )
self.assertEqual(lowerCamelCase ,lowerCamelCase )
| 109 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__magic_name__ = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__magic_name__ = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase ( ):
A_ : Union[str, Any] = (
list(range(ord("""!""") , ord("""~""") + 1)) + list(range(ord("""¡""") , ord("""¬""") + 1)) + list(range(ord("""®""") , ord("""ÿ""") + 1))
)
A_ : Optional[Any] = bs[:]
A_ : List[str] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase)
cs.append(2**8 + n)
n += 1
A_ : List[Any] = [chr(lowerCamelCase) for n in cs]
return dict(zip(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int):
A_ : int = set()
A_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
A_ : List[str] = char
return pairs
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : int ,_a : Tuple ,_a : Union[str, Any] ,_a : Optional[Any]="replace" ,_a : Union[str, Any]="<s>" ,_a : Union[str, Any]="</s>" ,_a : int="</s>" ,_a : List[str]="<s>" ,_a : List[Any]="<unk>" ,_a : Any="<pad>" ,_a : Dict="<mask>" ,_a : Optional[int]=False ,**_a : List[Any] ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
super().__init__(
errors=_a ,bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,**_a ,)
with open(_a ,encoding="""utf-8""" ) as vocab_handle:
A_ : str = json.load(_a )
A_ : Optional[int] = {v: k for k, v in self.encoder.items()}
A_ : List[str] = errors # how to handle errors in decoding
A_ : List[str] = bytes_to_unicode()
A_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_a ,encoding="""utf-8""" ) as merges_handle:
A_ : Any = merges_handle.read().split("""\n""" )[1:-1]
A_ : str = [tuple(merge.split() ) for merge in bpe_merges]
A_ : int = dict(zip(_a ,range(len(_a ) ) ) )
A_ : List[Any] = {}
A_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A_ : Optional[Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def _a ( self : Any ):
'''simple docstring'''
return len(self.encoder )
def _a ( self : str ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def _a ( self : int ,_a : int ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A_ : Optional[int] = tuple(_a )
A_ : Any = get_pairs(_a )
if not pairs:
return token
while True:
A_ : Optional[Any] = min(_a ,key=lambda _a : self.bpe_ranks.get(_a ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A_ , A_ : Dict = bigram
A_ : int = []
A_ : Optional[Any] = 0
while i < len(_a ):
try:
A_ : List[str] = word.index(_a ,_a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A_ : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A_ : str = tuple(_a )
A_ : str = new_word
if len(_a ) == 1:
break
else:
A_ : int = get_pairs(_a )
A_ : Optional[int] = """ """.join(_a )
A_ : List[str] = word
return word
def _a ( self : Dict ,_a : Optional[int] ):
'''simple docstring'''
A_ : Any = []
for token in re.findall(self.pat ,_a ):
A_ : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(""" """ ) )
return bpe_tokens
def _a ( self : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
return self.encoder.get(_a ,self.encoder.get(self.unk_token ) )
def _a ( self : int ,_a : Dict ):
'''simple docstring'''
return self.decoder.get(_a )
def _a ( self : Optional[int] ,_a : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = """""".join(_a )
A_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_a ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_a ,ensure_ascii=_a ) + """\n""" )
A_ : int = 0
with open(_a ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
A_ : Dict = token_index
writer.write(""" """.join(_a ) + """\n""" )
index += 1
return vocab_file, merge_file
def _a ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : int = [self.cls_token_id]
A_ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Union[str, Any] = [self.sep_token_id]
A_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : str ,_a : Optional[int] ,_a : Union[str, Any]=False ,**_a : Dict ):
'''simple docstring'''
A_ : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
A_ : Optional[int] = """ """ + text
return (text, kwargs)
| 665 | 0 |
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
__a : Dict = logging.get_logger(__name__)
class _UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
if isinstance(_a , _a ):
__lowercase = [label.strip() for label in labels.split(''',''' ) if label.strip()]
return labels
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
if len(_a ) == 0 or len(_a ) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(_a ) )
if isinstance(_a , _a ):
__lowercase = [sequences]
__lowercase = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(_a )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class _UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__=ZeroShotClassificationArgumentHandler() , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowercase = args_parser
super().__init__(*_a , **_a )
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''' )
@property
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail''' ):
return ind
return -1
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=TruncationStrategy.ONLY_FIRST , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''' )
__lowercase = self.tokenizer.eos_token
try:
__lowercase = self.tokenizer(
_a , add_special_tokens=_a , return_tensors=_a , padding=_a , truncation=_a , )
except Exception as e:
if "too short" in str(_a ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
__lowercase = self.tokenizer(
_a , add_special_tokens=_a , return_tensors=_a , padding=_a , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
if kwargs.get('''multi_class''' , _a ) is not None:
__lowercase = kwargs["""multi_class"""]
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''' )
__lowercase = {}
if "candidate_labels" in kwargs:
__lowercase = self._args_parser._parse_labels(kwargs['''candidate_labels'''] )
if "hypothesis_template" in kwargs:
__lowercase = kwargs["""hypothesis_template"""]
__lowercase = {}
if "multi_label" in kwargs:
__lowercase = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ , ) -> List[Any]:
'''simple docstring'''
if len(_a ) == 0:
pass
elif len(_a ) == 1 and "candidate_labels" not in kwargs:
__lowercase = args[0]
else:
raise ValueError(F"Unable to understand extra arguments {args}" )
return super().__call__(_a , **_a )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__="This example is {}." ) -> str:
'''simple docstring'''
__lowercase = self._args_parser(_a , _a , _a )
for i, (candidate_label, sequence_pair) in enumerate(zip(_a , _a ) ):
__lowercase = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(_a ) - 1,
**model_input,
}
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
__lowercase = inputs["""candidate_label"""]
__lowercase = inputs["""sequence"""]
__lowercase = {k: inputs[k] for k in self.tokenizer.model_input_names}
__lowercase = self.model(**_a )
__lowercase = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Any:
'''simple docstring'''
__lowercase = [outputs["""candidate_label"""] for outputs in model_outputs]
__lowercase = [outputs["""sequence"""] for outputs in model_outputs]
__lowercase = np.concatenate([output['''logits'''].numpy() for output in model_outputs] )
__lowercase = logits.shape[0]
__lowercase = len(_a )
__lowercase = N // n
__lowercase = logits.reshape((num_sequences, n, -1) )
if multi_label or len(_a ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
__lowercase = self.entailment_id
__lowercase = -1 if entailment_id == 0 else 0
__lowercase = reshaped_outputs[..., [contradiction_id, entailment_id]]
__lowercase = np.exp(_a ) / np.exp(_a ).sum(-1 , keepdims=_a )
__lowercase = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
__lowercase = reshaped_outputs[..., self.entailment_id]
__lowercase = np.exp(_a ) / np.exp(_a ).sum(-1 , keepdims=_a )
__lowercase = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
} | 534 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.txt'}
__magic_name__ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__magic_name__ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__magic_name__ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ConvBertTokenizer
def __init__( self : str ,_a : Dict=None ,_a : List[Any]=None ,_a : Dict=True ,_a : List[str]="[UNK]" ,_a : Any="[SEP]" ,_a : str="[PAD]" ,_a : List[Any]="[CLS]" ,_a : List[str]="[MASK]" ,_a : Union[str, Any]=True ,_a : Any=None ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
A_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_a ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_a ) != tokenize_chinese_chars
):
A_ : Dict = getattr(_a ,normalizer_state.pop("""type""" ) )
A_ : str = do_lower_case
A_ : Any = strip_accents
A_ : int = tokenize_chinese_chars
A_ : Tuple = normalizer_class(**_a )
A_ : Any = do_lower_case
def _a ( self : List[Any] ,_a : List[Any] ,_a : Any=None ):
'''simple docstring'''
A_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : int = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : List[Any] = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 665 | 0 |
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_lowercase : Union[str, Any] =datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class snake_case__ (datasets.BuilderConfig ):
"""simple docstring"""
__lowerCAmelCase :Optional[Any] = None
def lowerCAmelCase_ ( _lowercase : "pyspark.sql.DataFrame" , _lowercase : List[int] , ) -> Dict:
"""simple docstring"""
import pyspark
def generate_fn():
a__ : str = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id"""))
for partition_id in partition_order:
a__ : List[str] = df_with_partition_id.select("""*""").where(F'''part_id = {partition_id}''').drop("""part_id""")
a__ : Optional[int] = partition_df.collect()
a__ : Dict = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class snake_case__ (_BaseExamplesIterable ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase=None , ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = df
a__ : Any = partition_order or range(self.df.rdd.getNumPartitions() )
a__ : List[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self ) -> str:
"""simple docstring"""
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Optional[Any]:
"""simple docstring"""
a__ : str = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(_a )
return SparkExamplesIterable(self.df , partition_order=_a )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase ) -> List[Any]:
"""simple docstring"""
a__ : Union[str, Any] = self.split_shard_indices_by_worker(_a , _a )
return SparkExamplesIterable(self.df , partition_order=_a )
@property
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.partition_order )
class snake_case__ (datasets.DatasetBuilder ):
"""simple docstring"""
__lowerCAmelCase :Dict = SparkConfig
def __init__( self , __lowercase , __lowercase = None , __lowercase = None , **__lowercase , ) -> Union[str, Any]:
"""simple docstring"""
import pyspark
a__ : Union[str, Any] = pyspark.sql.SparkSession.builder.getOrCreate()
a__ : Optional[Any] = df
a__ : Union[str, Any] = working_dir
super().__init__(
cache_dir=_a , config_name=str(self.df.semanticHash() ) , **_a , )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
def create_cache_and_write_probe(__lowercase ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=_a )
a__ : Tuple = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(_a , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
a__ : Any = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(_a ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> str:
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
import pyspark
def get_arrow_batch_size(__lowercase ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
a__ : Union[str, Any] = self.df.count()
a__ : int = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
a__ : str = (
self.df.limit(_a )
.repartition(1 )
.mapInArrow(_a , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
a__ : Tuple = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
a__ : int = min(_a , int(approx_total_size / max_shard_size ) )
a__ : Any = self.df.repartition(_a )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , ) -> List[str]:
"""simple docstring"""
import pyspark
a__ : str = ParquetWriter if file_format == """parquet""" else ArrowWriter
a__ : Any = os.path.join(self._working_dir , os.path.basename(_a ) ) if self._working_dir else fpath
a__ : Tuple = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
a__ : int = self.config.features
a__ : Optional[Any] = self._writer_batch_size
a__ : List[str] = self._fs.storage_options
def write_arrow(__lowercase ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
a__ : Union[str, Any] = pyspark.TaskContext().taskAttemptId()
a__ : Any = next(_a , _a )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
a__ : List[Any] = 0
a__ : List[str] = writer_class(
features=_a , path=working_fpath.replace("""SSSSS""" , F'''{shard_id:05d}''' ).replace("""TTTTT""" , F'''{task_id:05d}''' ) , writer_batch_size=_a , storage_options=_a , embed_local_files=_a , )
a__ : Union[str, Any] = pa.Table.from_batches([first_batch] )
writer.write_table(_a )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
a__ : List[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
a__ : Optional[Any] = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , F'''{shard_id:05d}''' ).replace("""TTTTT""" , F'''{task_id:05d}''' ) , writer_batch_size=_a , storage_options=_a , embed_local_files=_a , )
a__ : str = pa.Table.from_batches([batch] )
writer.write_table(_a )
if writer._num_bytes > 0:
a__ : Optional[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(_a ) ):
a__ : int = os.path.join(os.path.dirname(_a ) , os.path.basename(_a ) )
shutil.move(_a , _a )
a__ : List[Any] = (
self.df.mapInArrow(_a , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = "arrow" , __lowercase = None , __lowercase = None , **__lowercase , ) -> Optional[Any]:
"""simple docstring"""
self._validate_cache_dir()
a__ : Tuple = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(_a )
a__ : List[Any] = not is_remote_filesystem(self._fs )
a__ : List[Any] = os.path.join if is_local else posixpath.join
a__ : List[Any] = """-TTTTT-SSSSS-of-NNNNN"""
a__ : Any = F'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
a__ : Optional[Any] = path_join(self._output_dir , _a )
a__ : Any = 0
a__ : Dict = 0
a__ : Dict = 0
a__ : int = []
a__ : List[Any] = []
for task_id, content in self._prepare_split_single(_a , _a , _a ):
(
a__
) : Tuple = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(_a )
a__ : Optional[Any] = total_num_examples
a__ : Optional[Any] = total_num_bytes
# should rename everything at the end
logger.debug(F'''Renaming {total_shards} shards.''' )
if total_shards > 1:
a__ : Any = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
a__ : Optional[int] = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__lowercase , __lowercase , __lowercase , ):
rename(
_a , fpath.replace("""SSSSS""" , F'''{shard_id:05d}''' ).replace("""TTTTT""" , F'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , F'''{global_shard_id:05d}''' ).replace("""NNNNN""" , F'''{total_shards:05d}''' ) , )
a__ : List[str] = []
a__ : int = 0
for i in range(len(_a ) ):
a__ : str = task_id_and_num_shards[i]
for shard_id in range(_a ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(_a , len(_a ) ).map(lambda __lowercase : _rename_shard(*_a ) ).collect()
else:
# don't use any pattern
a__ : List[str] = 0
a__ : Tuple = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , F'''{shard_id:05d}''' ).replace("""TTTTT""" , F'''{task_id:05d}''' ) , fpath.replace(_a , """""" ) , )
def SCREAMING_SNAKE_CASE__( self , __lowercase , ) -> str:
"""simple docstring"""
return SparkExamplesIterable(self.df )
| 136 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__magic_name__ = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = BartTokenizer
def __init__( self : str ,_a : Any=None ,_a : Optional[int]=None ,_a : int=None ,_a : Optional[int]="replace" ,_a : Dict="<s>" ,_a : Optional[Any]="</s>" ,_a : Dict="</s>" ,_a : Tuple="<s>" ,_a : Optional[Any]="<unk>" ,_a : List[str]="<pad>" ,_a : int="<mask>" ,_a : str=False ,_a : List[str]=True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(
_a ,_a ,tokenizer_file=_a ,errors=_a ,bos_token=_a ,eos_token=_a ,sep_token=_a ,cls_token=_a ,unk_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,trim_offsets=_a ,**_a ,)
A_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : List[str] = getattr(_a ,pre_tok_state.pop("""type""" ) )
A_ : Optional[int] = add_prefix_space
A_ : int = pre_tok_class(**_a )
A_ : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ : str = """post_processor"""
A_ : List[Any] = getattr(self.backend_tokenizer ,_a ,_a )
if tokenizer_component_instance:
A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : Tuple = tuple(state["""sep"""] )
if "cls" in state:
A_ : Tuple = tuple(state["""cls"""] )
A_ : List[str] = False
if state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : Dict = add_prefix_space
A_ : Any = True
if state.get("""trim_offsets""" ,_a ) != trim_offsets:
A_ : Union[str, Any] = trim_offsets
A_ : List[Any] = True
if changes_to_apply:
A_ : Optional[int] = getattr(_a ,state.pop("""type""" ) )
A_ : Tuple = component_class(**_a )
setattr(self.backend_tokenizer ,_a ,_a )
@property
def _a ( self : List[str] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _a ( self : Union[str, Any] ,_a : Any ):
'''simple docstring'''
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else value
A_ : List[Any] = value
def _a ( self : str ,*_a : str ,**_a : Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*_a ,**_a )
def _a ( self : str ,*_a : List[Any] ,**_a : str ):
'''simple docstring'''
A_ : List[str] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*_a ,**_a )
def _a ( self : Optional[int] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : str = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
def _a ( self : str ,_a : Optional[int] ,_a : int=None ):
'''simple docstring'''
A_ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Dict = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 665 | 0 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class a__ ( nn.Module ):
'''simple docstring'''
A : Optional[Any] = 42
A : List[str] = jnp.floataa
def lowerCAmelCase ( self : Dict ) -> Dict:
__A= nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[Any] , lowerCAmelCase_ : str ) -> str:
__A= hidden_states.shape
__A= jax.image.resize(
_a , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
__A= self.conv(_a )
return hidden_states
class a__ ( nn.Module ):
'''simple docstring'''
A : Tuple = 42
A : List[str] = jnp.floataa
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
__A= nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Dict , lowerCAmelCase_ : int ) -> int:
__A= self.conv(_a )
return hidden_states
class a__ ( nn.Module ):
'''simple docstring'''
A : List[Any] = 42
A : Dict = None
A : List[Any] = 0.0
A : int = None
A : str = jnp.floataa
def lowerCAmelCase ( self : int ) -> Any:
__A= self.in_channels if self.out_channels is None else self.out_channels
__A= nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__A= nn.Conv(
_a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__A= nn.Dense(_a , dtype=self.dtype )
__A= nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__A= nn.Dropout(self.dropout_prob )
__A= nn.Conv(
_a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__A= self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__A= None
if use_nin_shortcut:
__A= nn.Conv(
_a , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str]=True ) -> Optional[Any]:
__A= hidden_states
__A= self.norma(_a )
__A= nn.swish(_a )
__A= self.conva(_a )
__A= self.time_emb_proj(nn.swish(_a ) )
__A= jnp.expand_dims(jnp.expand_dims(_a , 1 ) , 1 )
__A= hidden_states + temb
__A= self.norma(_a )
__A= nn.swish(_a )
__A= self.dropout(_a , _a )
__A= self.conva(_a )
if self.conv_shortcut is not None:
__A= self.conv_shortcut(_a )
return hidden_states + residual
| 186 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
A_ : int = TapasConfig.from_json_file(lowerCamelCase)
# set absolute/relative position embeddings parameter
A_ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
A_ : Optional[int] = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WTQ":
# run_task_main.py hparams
A_ : Tuple = 4
A_ : Optional[Any] = True
# hparam_utils.py hparams
A_ : Any = 0.66_4694
A_ : str = 0.20_7951
A_ : Any = 0.12_1194
A_ : str = True
A_ : Dict = True
A_ : int = False
A_ : int = 0.035_2513
A_ : Tuple = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
A_ : int = 4
A_ : Union[str, Any] = False
# hparam_utils.py hparams
A_ : Dict = 36.4519
A_ : List[Any] = 0.90_3421
A_ : Any = 222.088
A_ : Optional[Any] = True
A_ : Optional[int] = True
A_ : Optional[Any] = True
A_ : Optional[int] = 0.76_3141
A_ : Any = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "TABFACT":
A_ : Any = TapasForSequenceClassification(config=lowerCamelCase)
elif task == "MLM":
A_ : List[Any] = TapasForMaskedLM(config=lowerCamelCase)
elif task == "INTERMEDIATE_PRETRAINING":
A_ : Union[str, Any] = TapasModel(config=lowerCamelCase)
else:
raise ValueError(F'Task {task} not supported.')
print(F'Building PyTorch model from configuration: {config}')
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}')
model.save_pretrained(lowerCamelCase)
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}')
A_ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512)
tokenizer.save_pretrained(lowerCamelCase)
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 665 | 0 |
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowercase__ =logging.getLogger()
lowercase__ =logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class a_ ( __SCREAMING_SNAKE_CASE ):
def lowerCAmelCase__ ( self , UpperCAmelCase ):
os.makedirs(_a , exist_ok=_a )
a_ = {"""source""": """What is love ?""", """target""": """life"""}
a_ = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
a_ = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(_a , f'''{split}.{field}''' ) , """w""" ) as f:
f.write(_a )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = "pytorch" ):
a_ = self.get_auto_remove_tmp_dir()
a_ = os.path.join(_a , """output""" )
a_ = os.path.join(_a , """data""" )
self._create_dummy_data(data_dir=_a )
a_ = f'''\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n '''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
a_ = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(_a , env=self.get_env() )
a_ = os.path.join(_a , """metrics.json""" )
with open(_a ) as f:
a_ = json.load(_a )
return result
@require_torch_gpu
def lowerCAmelCase__ ( self ):
a_ = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
def lowerCAmelCase__ ( self ):
a_ = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_gpu
@require_ray
def lowerCAmelCase__ ( self ):
a_ = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowerCAmelCase__ ( self ):
a_ = self._run_finetune(gpus=1 , distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] , 0.2 )
| 263 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""vqvae"""]
def __init__( self : Optional[Any] ,_a : AutoencoderKL ,_a : UNetaDConditionModel ,_a : Mel ,_a : Union[DDIMScheduler, DDPMScheduler] ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_a ,scheduler=_a ,mel=_a ,vqvae=_a )
def _a ( self : str ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_a ) else 1000
@torch.no_grad()
def __call__( self : Optional[int] ,_a : int = 1 ,_a : str = None ,_a : np.ndarray = None ,_a : int = 0 ,_a : int = 0 ,_a : int = None ,_a : torch.Generator = None ,_a : float = 0 ,_a : float = 0 ,_a : torch.Generator = None ,_a : float = 0 ,_a : torch.Tensor = None ,_a : torch.Tensor = None ,_a : int=True ,):
'''simple docstring'''
A_ : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_a )
A_ : Union[str, Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
A_ : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
A_ : int = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_a ,device=self.device ,)
A_ : List[Any] = noise
A_ : str = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_a ,_a )
A_ : Any = self.mel.audio_slice_to_image(_a )
A_ : Union[str, Any] = np.frombuffer(input_image.tobytes() ,dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
A_ : Optional[Any] = (input_image / 255) * 2 - 1
A_ : Union[str, Any] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
A_ : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(_a ,0 ) ).latent_dist.sample(
generator=_a )[0]
A_ : List[str] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
A_ : Any = self.scheduler.add_noise(_a ,_a ,self.scheduler.timesteps[start_step - 1] )
A_ : Tuple = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
A_ : Tuple = int(mask_start_secs * pixels_per_second )
A_ : str = int(mask_end_secs * pixels_per_second )
A_ : int = self.scheduler.add_noise(_a ,_a ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_a ):
A_ : Optional[Any] = self.unet(_a ,_a ,_a )["""sample"""]
else:
A_ : List[Any] = self.unet(_a ,_a )["""sample"""]
if isinstance(self.scheduler ,_a ):
A_ : Dict = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,eta=_a ,generator=_a ,)["""prev_sample"""]
else:
A_ : Any = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,generator=_a ,)["""prev_sample"""]
if mask is not None:
if mask_start > 0:
A_ : Tuple = mask[:, step, :, :mask_start]
if mask_end > 0:
A_ : List[str] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
A_ : str = 1 / self.vqvae.config.scaling_factor * images
A_ : Union[str, Any] = self.vqvae.decode(_a )["""sample"""]
A_ : int = (images / 2 + 0.5).clamp(0 ,1 )
A_ : str = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
A_ : Optional[int] = (images * 255).round().astype("""uint8""" )
A_ : List[Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_a ,mode="""RGB""" ).convert("""L""" ) for _ in images) )
A_ : Tuple = [self.mel.image_to_audio(_a ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_a )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_a ) )
@torch.no_grad()
def _a ( self : Union[str, Any] ,_a : List[Image.Image] ,_a : int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_a )
self.scheduler.set_timesteps(_a )
A_ : Optional[Any] = np.array(
[np.frombuffer(image.tobytes() ,dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
A_ : List[str] = (sample / 255) * 2 - 1
A_ : Optional[int] = torch.Tensor(_a ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
A_ : List[str] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
A_ : Any = self.scheduler.alphas_cumprod[t]
A_ : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
A_ : str = 1 - alpha_prod_t
A_ : List[str] = self.unet(_a ,_a )["""sample"""]
A_ : str = (1 - alpha_prod_t_prev) ** 0.5 * model_output
A_ : Union[str, Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
A_ : Optional[int] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _a ( _a : torch.Tensor ,_a : torch.Tensor ,_a : float ):
'''simple docstring'''
A_ : List[Any] = acos(torch.dot(torch.flatten(_a ) ,torch.flatten(_a ) ) / torch.norm(_a ) / torch.norm(_a ) )
return sin((1 - alpha) * theta ) * xa / sin(_a ) + sin(alpha * theta ) * xa / sin(_a )
| 665 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 443 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__magic_name__ = 16
__magic_name__ = 32
def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16):
A_ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""")
A_ : str = load_dataset("""glue""" , """mrpc""")
def tokenize_function(lowerCamelCase : Dict):
# max_length=None => use the model max length (it's actually the default)
A_ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A_ : Tuple = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(lowerCamelCase : Tuple):
# On TPU it's best to pad everything to the same length or training will be very slow.
A_ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A_ : List[Any] = 16
elif accelerator.mixed_precision != "no":
A_ : Any = 8
else:
A_ : Tuple = None
return tokenizer.pad(
lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
A_ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=lowerCamelCase)
A_ : str = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Dict):
# Initialize accelerator
A_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : List[Any] = config["""lr"""]
A_ : List[Any] = int(config["""num_epochs"""])
A_ : int = int(config["""seed"""])
A_ : Dict = int(config["""batch_size"""])
A_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""")
# If the batch size is too big we use gradient accumulation
A_ : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A_ : Any = batch_size // MAX_GPU_BATCH_SIZE
A_ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(lowerCamelCase)
A_ , A_ : List[str] = get_dataloaders(lowerCamelCase , lowerCamelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ : str = model.to(accelerator.device)
# Instantiate optimizer
A_ : str = AdamW(params=model.parameters() , lr=lowerCamelCase)
# Instantiate scheduler
A_ : Tuple = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ , A_ , A_ , A_ , A_ : Union[str, Any] = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Now we train the model
for epoch in range(lowerCamelCase):
model.train()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
A_ : Optional[int] = model(**lowerCamelCase)
A_ : List[Any] = outputs.loss
A_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
A_ : Union[str, Any] = model(**lowerCamelCase)
A_ : Any = outputs.logits.argmax(dim=-1)
A_ , A_ : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]))
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
A_ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase)
def lowerCamelCase ( ):
A_ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""")
parser.add_argument(
"""--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""")
A_ : Dict = parser.parse_args()
A_ : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Dict = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ["""TimmBackbone"""]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 613 |
'''simple docstring'''
import functools
def lowerCamelCase ( lowerCamelCase : list[int] , lowerCamelCase : list[int]):
# Validation
if not isinstance(lowerCamelCase , lowerCamelCase) or not all(isinstance(lowerCamelCase , lowerCamelCase) for day in days):
raise ValueError("""The parameter days should be a list of integers""")
if len(lowerCamelCase) != 3 or not all(isinstance(lowerCamelCase , lowerCamelCase) for cost in costs):
raise ValueError("""The parameter costs should be a list of three integers""")
if len(lowerCamelCase) == 0:
return 0
if min(lowerCamelCase) <= 0:
raise ValueError("""All days elements should be greater than 0""")
if max(lowerCamelCase) >= 366:
raise ValueError("""All days elements should be less than 366""")
A_ : Tuple = set(lowerCamelCase)
@functools.cache
def dynamic_programming(lowerCamelCase : int) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1)
return min(
costs[0] + dynamic_programming(index + 1) , costs[1] + dynamic_programming(index + 7) , costs[2] + dynamic_programming(index + 30) , )
return dynamic_programming(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _A ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
a__ : Optional[int] =[]
for part_id in partition_order:
a__ : Union[str, Any] =df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(SCREAMING_SNAKE_CASE ):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _A ( ):
"""simple docstring"""
a__ : int =pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
a__ : Union[str, Any] =spark.range(100 ).repartition(1 )
a__ : Dict =Spark(SCREAMING_SNAKE_CASE )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _A ( ):
"""simple docstring"""
a__ : Optional[int] =pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
a__ : Optional[int] =spark.range(10 ).repartition(2 )
a__ : Optional[Any] =[1, 0]
a__ : Any =_generate_iterable_examples(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Reverse the partitions.
a__ : Any =_get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
a__ : Optional[int] =expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _A ( ):
"""simple docstring"""
a__ : Optional[int] =pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
a__ : List[Any] =spark.range(10 ).repartition(1 )
a__ : str =SparkExamplesIterable(SCREAMING_SNAKE_CASE )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE ):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _A ( ):
"""simple docstring"""
a__ : List[str] =pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
a__ : str =spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
a__ : Optional[int] =lambda SCREAMING_SNAKE_CASE : x.reverse()
a__ : Dict =_get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE , [2, 1, 0] )
a__ : str =SparkExamplesIterable(SCREAMING_SNAKE_CASE ).shuffle_data_sources(SCREAMING_SNAKE_CASE )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE ):
a__ : List[str] =expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _A ( ):
"""simple docstring"""
a__ : Optional[Any] =pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
a__ : str =spark.range(20 ).repartition(4 )
# Partitions 0 and 2
a__ : Dict =SparkExamplesIterable(SCREAMING_SNAKE_CASE ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
a__ : Optional[int] =_get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE , [0, 2] )
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE ):
a__ : Union[str, Any] =expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
a__ : Optional[int] =SparkExamplesIterable(SCREAMING_SNAKE_CASE ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
a__ : Any =_get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE , [1, 3] )
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE ):
a__ : Union[str, Any] =expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _A ( ):
"""simple docstring"""
a__ : List[Any] =pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
a__ : Any =spark.range(100 ).repartition(1 )
a__ : str =Spark(SCREAMING_SNAKE_CASE )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 563 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCamelCase ( lowerCamelCase : NDArray[floataa] , lowerCamelCase : NDArray[floataa] , lowerCamelCase : list[int] , lowerCamelCase : int , ):
A_ , A_ : int = coefficient_matrix.shape
A_ , A_ : Union[str, Any] = constant_matrix.shape
if rowsa != colsa:
A_ : Any = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if colsa != 1:
A_ : Tuple = F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if rowsa != rowsa:
A_ : Dict = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(lowerCamelCase)
if len(lowerCamelCase) != rowsa:
A_ : Union[str, Any] = (
"""Number of initial values must be equal to number of rows in coefficient """
F'matrix but received {len(lowerCamelCase)} and {rowsa}'
)
raise ValueError(lowerCamelCase)
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""")
A_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1)
A_ , A_ : int = table.shape
strictly_diagonally_dominant(lowerCamelCase)
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase):
A_ : List[Any] = []
for row in range(lowerCamelCase):
A_ : int = 0
for col in range(lowerCamelCase):
if col == row:
A_ : List[str] = table[row][col]
elif col == cols - 1:
A_ : str = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ : Union[str, Any] = (temp + val) / denom
new_val.append(lowerCamelCase)
A_ : Tuple = new_val
return [float(lowerCamelCase) for i in new_val]
def lowerCamelCase ( lowerCamelCase : NDArray[floataa]):
A_ , A_ : Dict = table.shape
A_ : Union[str, Any] = True
for i in range(0 , lowerCamelCase):
A_ : str = 0
for j in range(0 , cols - 1):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""")
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE : List[str] = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[int] = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 294 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
A_ : Any = len(lowerCamelCase)
A_ : Optional[Any] = len(lowerCamelCase)
A_ : Optional[int] = [[False for _ in range(m + 1)] for _ in range(n + 1)]
A_ : Union[str, Any] = True
for i in range(lowerCamelCase):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A_ : Optional[int] = True
if a[i].islower():
A_ : List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def snake_case_ (__A : Accelerator , __A : int = 1_6 ) -> Union[str, Any]:
__lowerCAmelCase : Any = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__lowerCAmelCase : str = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__A : Dict ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__A , max_length=__A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCAmelCase : Tuple = datasets.map(
__A , batched=__A , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCAmelCase : List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__A : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCAmelCase : str = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCAmelCase : List[Any] = 1_6
elif accelerator.mixed_precision != "no":
__lowerCAmelCase : Any = 8
else:
__lowerCAmelCase : Tuple = None
return tokenizer.pad(
__A , padding="""longest""" , max_length=__A , pad_to_multiple_of=__A , return_tensors="""pt""" , )
# Instantiate dataloaders.
__lowerCAmelCase : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=__A , collate_fn=__A , batch_size=__A , drop_last=__A )
__lowerCAmelCase : str = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__A , collate_fn=__A , batch_size=__A , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def snake_case_ (__A : Any , __A : Dict ) -> Optional[Any]:
# Initialize accelerator
__lowerCAmelCase : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase : List[Any] = config["""lr"""]
__lowerCAmelCase : List[Any] = int(config["""num_epochs"""] )
__lowerCAmelCase : int = int(config["""seed"""] )
__lowerCAmelCase : Dict = int(config["""batch_size"""] )
__lowerCAmelCase : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
__lowerCAmelCase : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__lowerCAmelCase : Any = batch_size // MAX_GPU_BATCH_SIZE
__lowerCAmelCase : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(__A )
__lowerCAmelCase : List[str] = get_dataloaders(__A , __A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase : str = model.to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase : str = AdamW(params=model.parameters() , lr=__A )
# Instantiate scheduler
__lowerCAmelCase : Tuple = get_linear_schedule_with_warmup(
optimizer=__A , num_warmup_steps=1_0_0 , num_training_steps=(len(__A ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase : Union[str, Any] = accelerator.prepare(
__A , __A , __A , __A , __A )
# Now we train the model
for epoch in range(__A ):
model.train()
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCAmelCase : Optional[int] = model(**__A )
__lowerCAmelCase : List[Any] = outputs.loss
__lowerCAmelCase : Tuple = loss / gradient_accumulation_steps
accelerator.backward(__A )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = model(**__A )
__lowerCAmelCase : Any = outputs.logits.argmax(dim=-1 )
__lowerCAmelCase : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__A , references=__A , )
__lowerCAmelCase : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __A )
def snake_case_ () -> str:
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__A , default=__A , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
__lowerCAmelCase : Dict = parser.parse_args()
__lowerCAmelCase : Dict = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 651 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
a_ = 42
a_ = 42
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : list[list[Edge]] = [[] for _ in range(_a )]
A_ : List[Any] = size
def __getitem__( self : int ,_a : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _a ( self : str ):
'''simple docstring'''
return self._size
def _a ( self : str ,_a : int ,_a : int ,_a : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(_a ,_a ) )
def _a ( self : Dict ,_a : int ,_a : int ):
'''simple docstring'''
A_ : Tuple = deque([start_vertex] )
A_ : list[int | None] = [None] * self.size
A_ : Union[str, Any] = 0
while queue:
A_ : List[Any] = queue.popleft()
A_ : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A_ : Union[str, Any] = current_distance + edge.weight
A_ : Optional[Any] = distances[edge.destination_vertex]
if (
isinstance(_a ,_a )
and new_distance >= dest_vertex_distance
):
continue
A_ : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ):
lowerCamelCase : int = coefficient_matrix.shape
lowerCamelCase : Union[str, Any] = constant_matrix.shape
if rowsa != colsa:
lowerCamelCase : Any = F'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(lowerCamelCase )
if colsa != 1:
lowerCamelCase : Tuple = F'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(lowerCamelCase )
if rowsa != rowsa:
lowerCamelCase : Dict = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(lowerCamelCase )
if len(lowerCamelCase ) != rowsa:
lowerCamelCase : Union[str, Any] = (
"""Number of initial values must be equal to number of rows in coefficient """
F'''matrix but received {len(lowerCamelCase )} and {rowsa}'''
)
raise ValueError(lowerCamelCase )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
lowerCamelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix), axis=1 )
lowerCamelCase : int = table.shape
strictly_diagonally_dominant(lowerCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase ):
lowerCamelCase : List[Any] = []
for row in range(lowerCamelCase ):
lowerCamelCase : int = 0
for col in range(lowerCamelCase ):
if col == row:
lowerCamelCase : List[str] = table[row][col]
elif col == cols - 1:
lowerCamelCase : str = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowerCamelCase : Union[str, Any] = (temp + val) / denom
new_val.append(lowerCamelCase )
lowerCamelCase : Tuple = new_val
return [float(lowerCamelCase ) for i in new_val]
def _a ( lowerCamelCase ):
lowerCamelCase : Dict = table.shape
lowerCamelCase : Union[str, Any] = True
for i in range(0, lowerCamelCase ):
lowerCamelCase : str = 0
for j in range(0, cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int = 10**9):
A_ : Optional[int] = 1
A_ : int = 2
A_ : List[Any] = 0
A_ : Optional[Any] = 0
A_ : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
A_ : Optional[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 665 | 0 |
'''simple docstring'''
def lowerCamelCase ( __lowerCamelCase : int = 10**9 ) ->Union[str, Any]:
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 314 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase ( ):
A_ : Optional[int] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCamelCase)
A_ : Optional[int] = parser.add_subparsers(help="""accelerate command helpers""")
# Register commands
get_config_parser(subparsers=lowerCamelCase)
env_command_parser(subparsers=lowerCamelCase)
launch_command_parser(subparsers=lowerCamelCase)
tpu_command_parser(subparsers=lowerCamelCase)
test_command_parser(subparsers=lowerCamelCase)
# Let's go
A_ : Dict = parser.parse_args()
if not hasattr(lowerCamelCase , """func"""):
parser.print_help()
exit(1)
# Run
args.func(lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 0 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=2 , lowerCAmelCase__=32 , lowerCAmelCase__=16 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=4 , lowerCAmelCase__=[0, 1, 2, 3] , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=[1, 3_84, 24, 24] , lowerCAmelCase__=True , lowerCAmelCase__=None , ) -> List[Any]:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = backbone_out_indices
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = num_labels
__lowercase = backbone_featmap_shape
__lowercase = scope
__lowercase = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__lowercase = (image_size // patch_size) ** 2
__lowercase = num_patches + 1
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 1_92, 3_84, 7_68],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_a , backbone_featmap_shape=self.backbone_featmap_shape , )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
__lowercase = DPTModel(config=_a )
model.to(_a )
model.eval()
__lowercase = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = DPTForDepthEstimation(_a )
model.to(_a )
model.eval()
__lowercase = model(_a )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = DPTForSemanticSegmentation(_a )
model.to(_a )
model.eval()
__lowercase = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase = config_and_inputs
__lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,unittest.TestCase ):
"""simple docstring"""
__a : Optional[int] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__a : List[Any] = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__a : Any = False
__a : int = False
__a : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = DPTModelTester(self )
__lowercase = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_a )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_a )
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
if model_class in get_values(_a ):
continue
__lowercase = model_class(_a )
model.to(_a )
model.train()
__lowercase = self._prepare_for_class(_a , _a , return_labels=_a )
__lowercase = model(**_a ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = False
__lowercase = True
if model_class in get_values(_a ) or not model_class.supports_gradient_checkpointing:
continue
__lowercase = model_class(_a )
model.to(_a )
model.gradient_checkpointing_enable()
model.train()
__lowercase = self._prepare_for_class(_a , _a , return_labels=_a )
__lowercase = model(**_a ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = _config_zero_init(_a )
for model_class in self.all_model_classes:
__lowercase = model_class(config=_a )
# Skip the check for the backbone
__lowercase = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__lowercase = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
pass
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__lowercase = DPTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = """add"""
with self.assertRaises(_a ):
__lowercase = DPTForDepthEstimation(_a )
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
__lowercase = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(_a )
__lowercase = prepare_img()
__lowercase = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__lowercase = model(**_a )
__lowercase = outputs.predicted_depth
# verify the predicted depth
__lowercase = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , _a )
__lowercase = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(_a )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , _a , atol=1E-4 ) ) | 534 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 0 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :List[Any] = GPTaTokenizer
__lowerCAmelCase :List[str] = GPTaTokenizerFast
__lowerCAmelCase :Optional[Any] = True
__lowerCAmelCase :Dict = {"add_prefix_space": True}
__lowerCAmelCase :Dict = False
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a__ : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
a__ : Optional[int] = dict(zip(_a , range(len(_a ) ) ) )
a__ : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
a__ : Union[str, Any] = {"""unk_token""": """<unk>"""}
a__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
a__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_a ) )
def SCREAMING_SNAKE_CASE__( self , **__lowercase ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **_a )
def SCREAMING_SNAKE_CASE__( self , **__lowercase ) -> Optional[int]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> str:
"""simple docstring"""
a__ : Optional[int] = """lower newer"""
a__ : List[Any] = """lower newer"""
return input_text, output_text
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
a__ : Any = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a__ : str = """lower newer"""
a__ : Tuple = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
a__ : str = tokenizer.tokenize(_a , add_prefix_space=_a )
self.assertListEqual(_a , _a )
a__ : Optional[int] = tokens + [tokenizer.unk_token]
a__ : Tuple = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
a__ : List[Any] = self.get_tokenizer()
a__ : int = self.get_rust_tokenizer(add_prefix_space=_a )
a__ : Tuple = """lower newer"""
# Testing tokenization
a__ : str = tokenizer.tokenize(_a , add_prefix_space=_a )
a__ : Optional[int] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
# Testing conversion to ids without special tokens
a__ : Dict = tokenizer.encode(_a , add_special_tokens=_a , add_prefix_space=_a )
a__ : Optional[Any] = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
# Testing conversion to ids with special tokens
a__ : Tuple = self.get_rust_tokenizer(add_prefix_space=_a )
a__ : Tuple = tokenizer.encode(_a , add_prefix_space=_a )
a__ : Any = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
# Testing the unknown token
a__ : List[str] = tokens + [rust_tokenizer.unk_token]
a__ : str = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_a ) , _a )
def SCREAMING_SNAKE_CASE__( self , *__lowercase , **__lowercase ) -> Union[str, Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__( self , __lowercase=1_5 ) -> int:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a__ : Dict = self.rust_tokenizer_class.from_pretrained(_a , **_a )
# Simple input
a__ : Tuple = """This is a simple input"""
a__ : Tuple = ["""This is a simple input 1""", """This is a simple input 2"""]
a__ : List[str] = ("""This is a simple input""", """This is a pair""")
a__ : Any = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding="""max_length""" )
# Simple input
self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding="""max_length""" )
# Simple input
self.assertRaises(
_a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding="""max_length""" , )
# Pair input
self.assertRaises(_a , tokenizer_r.encode , _a , max_length=_a , padding="""max_length""" )
# Pair input
self.assertRaises(_a , tokenizer_r.encode_plus , _a , max_length=_a , padding="""max_length""" )
# Pair input
self.assertRaises(
_a , tokenizer_r.batch_encode_plus , _a , max_length=_a , padding="""max_length""" , )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : List[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
a__ : Dict = """This is a simple input"""
a__ : List[str] = ["""This is a simple input looooooooong""", """This is a simple input"""]
a__ : int = ("""This is a simple input""", """This is a pair""")
a__ : Tuple = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
a__ : Optional[int] = tokenizer.pad_token_id
a__ : Union[str, Any] = tokenizer(_a , padding="""max_length""" , max_length=3_0 , return_tensors="""np""" )
a__ : Union[str, Any] = tokenizer(_a , padding=_a , truncate=_a , return_tensors="""np""" )
a__ : List[Any] = tokenizer(*_a , padding="""max_length""" , max_length=6_0 , return_tensors="""np""" )
a__ : str = tokenizer(_a , padding=_a , truncate=_a , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 3_0 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 6_0 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
a__ : Any = """$$$"""
a__ : str = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=_a , add_bos_token=_a )
a__ : Optional[Any] = """This is a simple input"""
a__ : Any = ["""This is a simple input 1""", """This is a simple input 2"""]
a__ : Dict = tokenizer.bos_token_id
a__ : int = tokenizer(_a )
a__ : Union[str, Any] = tokenizer(_a )
self.assertEqual(out_s.input_ids[0] , _a )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
a__ : Any = tokenizer.decode(out_s.input_ids )
a__ : Dict = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _a )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : Dict = [self.get_tokenizer(do_lower_case=_a , add_bos_token=_a )]
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
a__ : Dict = """Encode this."""
a__ : Optional[int] = """This one too please."""
a__ : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
encoded_sequence += tokenizer.encode(_a , add_special_tokens=_a )
a__ : List[str] = tokenizer.encode_plus(
_a , _a , add_special_tokens=_a , return_special_tokens_mask=_a , )
a__ : str = encoded_sequence_dict["""input_ids"""]
a__ : List[Any] = encoded_sequence_dict["""special_tokens_mask"""]
self.assertEqual(len(_a ) , len(_a ) )
a__ : List[Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(_a )
]
a__ : int = [x for x in filtered_sequence if x is not None]
self.assertEqual(_a , _a )
@require_tokenizers
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
a__ : Dict = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=_a )
a__ : int = """A photo of a cat"""
a__ : Union[str, Any] = tokenizer.encode(
_a , )
self.assertEqual(_a , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained("""test_opt""" )
a__ : int = AutoTokenizer.from_pretrained("""./test_opt""" )
a__ : Optional[int] = tokenizer.encode(
_a , )
self.assertEqual(_a , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : int = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , use_slow=_a )
a__ : List[str] = """A photo of a cat"""
a__ : Union[str, Any] = tokenizer.encode(
_a , )
# Same as above
self.assertEqual(_a , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
@unittest.skip("""This test is failing because of a bug in the fast tokenizer""" )
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
a__ : Dict = AutoTokenizer.from_pretrained("""facebook/opt-350m""" , from_slow=_a )
a__ : Optional[int] = """bos"""
a__ : Optional[int] = tokenizer.get_vocab()["""bos"""]
a__ : Optional[Any] = """A photo of a cat"""
a__ : Tuple = tokenizer.encode(
_a , )
# We changed the bos token
self.assertEqual(_a , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
tokenizer.save_pretrained("""./tok""" )
a__ : Union[str, Any] = AutoTokenizer.from_pretrained("""./tok""" )
self.assertTrue(tokenizer.is_fast )
a__ : Optional[int] = tokenizer.encode(
_a , )
self.assertEqual(_a , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
| 136 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['YolosFeatureExtractor']
__magic_name__ = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase__ = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase__ = {
'''bert-base-uncased''': 5_1_2,
'''bert-large-uncased''': 5_1_2,
'''bert-base-cased''': 5_1_2,
'''bert-large-cased''': 5_1_2,
'''bert-base-multilingual-uncased''': 5_1_2,
'''bert-base-multilingual-cased''': 5_1_2,
'''bert-base-chinese''': 5_1_2,
'''bert-base-german-cased''': 5_1_2,
'''bert-large-uncased-whole-word-masking''': 5_1_2,
'''bert-large-cased-whole-word-masking''': 5_1_2,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 5_1_2,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 5_1_2,
'''bert-base-cased-finetuned-mrpc''': 5_1_2,
'''bert-base-german-dbmdz-cased''': 5_1_2,
'''bert-base-german-dbmdz-uncased''': 5_1_2,
'''TurkuNLP/bert-base-finnish-cased-v1''': 5_1_2,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 5_1_2,
'''wietsedv/bert-base-dutch-cased''': 5_1_2,
}
UpperCAmelCase__ = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class a__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Optional[int] = VOCAB_FILES_NAMES
A : Tuple = PRETRAINED_VOCAB_FILES_MAP
A : Any = PRETRAINED_INIT_CONFIGURATION
A : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = BertTokenizer
def __init__( self : Optional[int] , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : List[Any]="[UNK]" , lowerCAmelCase_ : Any="[SEP]" , lowerCAmelCase_ : Union[str, Any]="[PAD]" , lowerCAmelCase_ : List[str]="[CLS]" , lowerCAmelCase_ : Dict="[MASK]" , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Dict=None , **lowerCAmelCase_ : Dict , ) -> Tuple:
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
__A= json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _a ) != do_lower_case
or normalizer_state.get('strip_accents' , _a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _a ) != tokenize_chinese_chars
):
__A= getattr(_a , normalizer_state.pop('type' ) )
__A= do_lower_case
__A= strip_accents
__A= tokenize_chinese_chars
__A= normalizer_class(**_a )
__A= do_lower_case
def lowerCAmelCase ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any]=None ) -> str:
__A= [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> Tuple:
__A= [self.sep_token_id]
__A= [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Dict:
__A= self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 186 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 0 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
lowercase__ ='CompVis/stable-diffusion-v1-1'
lowercase__ ='CompVis/stable-diffusion-v1-2'
lowercase__ ='CompVis/stable-diffusion-v1-3'
lowercase__ ='CompVis/stable-diffusion-v1-4'
class a_ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , ):
super()._init_()
a_ = StableDiffusionPipeline.from_pretrained(_a )
a_ = StableDiffusionPipeline.from_pretrained(_a )
a_ = StableDiffusionPipeline.from_pretrained(_a )
a_ = StableDiffusionPipeline(
vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , safety_checker=_a , feature_extractor=_a , requires_safety_checker=_a , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def lowerCAmelCase__ ( self ):
return {k: getattr(self , _a ) for k in self.config.keys() if not k.startswith("""_""" )}
def lowerCAmelCase__ ( self , UpperCAmelCase = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def lowerCAmelCase__ ( self ):
self.enable_attention_slicing(_a )
@torch.no_grad()
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = 5_12 , UpperCAmelCase = 5_12 , UpperCAmelCase = 50 , UpperCAmelCase = 7.5 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 1 , **UpperCAmelCase , ):
return self.pipea(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
@torch.no_grad()
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = 5_12 , UpperCAmelCase = 5_12 , UpperCAmelCase = 50 , UpperCAmelCase = 7.5 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 1 , **UpperCAmelCase , ):
return self.pipea(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
@torch.no_grad()
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = 5_12 , UpperCAmelCase = 5_12 , UpperCAmelCase = 50 , UpperCAmelCase = 7.5 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 1 , **UpperCAmelCase , ):
return self.pipea(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
@torch.no_grad()
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = 5_12 , UpperCAmelCase = 5_12 , UpperCAmelCase = 50 , UpperCAmelCase = 7.5 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 1 , **UpperCAmelCase , ):
return self.pipea(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
@torch.no_grad()
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = 5_12 , UpperCAmelCase = 5_12 , UpperCAmelCase = 50 , UpperCAmelCase = 7.5 , UpperCAmelCase = None , UpperCAmelCase = 1 , UpperCAmelCase = 0.0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = "pil" , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = 1 , **UpperCAmelCase , ):
a_ = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(_a )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
a_ = self.textaimg_sda_a(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
# Get first result from Stable Diffusion Checkpoint v1.2
a_ = self.textaimg_sda_a(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
# Get first result from Stable Diffusion Checkpoint v1.3
a_ = self.textaimg_sda_a(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
# Get first result from Stable Diffusion Checkpoint v1.4
a_ = self.textaimg_sda_a(
prompt=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , **_a , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 263 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : str = [0] * len(lowerCamelCase)
A_ : Union[str, Any] = []
A_ : Union[str, Any] = []
A_ : Tuple = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCamelCase)):
if indegree[i] == 0:
queue.append(lowerCamelCase)
while queue:
A_ : Any = queue.pop(0)
cnt += 1
topo.append(lowerCamelCase)
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowerCamelCase)
if cnt != len(lowerCamelCase):
print("""Cycle exists""")
else:
print(lowerCamelCase)
# Adjacency List of Graph
__magic_name__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 665 | 0 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Path , UpperCAmelCase_ : str = None , UpperCAmelCase_ : str = None , UpperCAmelCase_ : str = None , ) -> Dict:
if config_name_or_path is None:
SCREAMING_SNAKE_CASE_ : int ="""facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : List[str] =generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] =question_encoder_name_or_path
SCREAMING_SNAKE_CASE_ : Tuple =RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE_ : int =RagConfig.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Dict =AutoConfig.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : int =AutoConfig.from_pretrained(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Any =gen_config
SCREAMING_SNAKE_CASE_ : Tuple =question_encoder_config
SCREAMING_SNAKE_CASE_ : Any =model_class.from_pretrained_question_encoder_generator(
UpperCAmelCase_ , UpperCAmelCase_ , config=UpperCAmelCase_ )
rag_model.save_pretrained(UpperCAmelCase_ )
# Sanity check.
model_class.from_pretrained(UpperCAmelCase_ )
# Save tokenizers.
SCREAMING_SNAKE_CASE_ : Tuple =AutoTokenizer.from_pretrained(UpperCAmelCase_ )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
SCREAMING_SNAKE_CASE_ : List[str] =AutoTokenizer.from_pretrained(UpperCAmelCase_ )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
_lowercase = parser.parse_args()
_lowercase = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 443 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[str]=7 ,_a : Dict=True ,_a : List[Any]=True ,_a : Dict=False ,_a : Optional[int]=True ,_a : List[Any]=99 ,_a : Any=32 ,_a : Optional[int]=5 ,_a : List[Any]=4 ,_a : int=37 ,_a : List[Any]="gelu" ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : Any=512 ,_a : int=16 ,_a : Optional[int]=2 ,_a : Any=0.02 ,_a : Any=3 ,_a : Any=4 ,_a : List[str]=None ,):
'''simple docstring'''
A_ : List[str] = parent
A_ : Any = batch_size
A_ : Tuple = seq_length
A_ : List[str] = is_training
A_ : Tuple = use_input_mask
A_ : Dict = use_token_type_ids
A_ : List[Any] = use_labels
A_ : Union[str, Any] = vocab_size
A_ : Any = hidden_size
A_ : str = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : str = intermediate_size
A_ : Tuple = hidden_act
A_ : Any = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : int = type_vocab_size
A_ : Union[str, Any] = type_sequence_label_size
A_ : Any = initializer_range
A_ : List[Any] = num_labels
A_ : Optional[Any] = num_choices
A_ : List[Any] = scope
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : int = None
if self.use_input_mask:
A_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Dict = None
if self.use_token_type_ids:
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : str = None
A_ : Any = None
A_ : str = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
A_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
def _a ( self : Union[str, Any] ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Any ,_a : Any ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Tuple ):
'''simple docstring'''
A_ : Any = LlamaModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[Any] = model(_a ,attention_mask=_a )
A_ : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Optional[int] ,_a : int ,_a : List[str] ,_a : Any ,_a : Any ,_a : Dict ,_a : List[str] ,_a : Optional[int] ,_a : Any ,_a : List[str] ,):
'''simple docstring'''
A_ : List[str] = True
A_ : Union[str, Any] = LlamaModel(_a )
model.to(_a )
model.eval()
A_ : Tuple = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,)
A_ : List[Any] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,)
A_ : int = model(_a ,attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Any ,_a : Any ,_a : Optional[int] ,_a : List[Any] ,_a : List[Any] ,_a : Dict ,_a : Tuple ,_a : Optional[int] ,_a : List[Any] ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : List[Any] = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
A_ : Dict = model(_a ,attention_mask=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : str ,_a : List[Any] ,_a : Dict ,_a : str ,_a : Tuple ,_a : Tuple ,_a : Tuple ,_a : Optional[Any] ,_a : Dict ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : Optional[Any] = True
A_ : Any = True
A_ : Tuple = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
A_ : Optional[int] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,use_cache=_a ,)
A_ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A_ : List[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
A_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
A_ : int = torch.cat([input_mask, next_mask] ,dim=-1 )
A_ : List[str] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
A_ : Any = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,past_key_values=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
# select random slice
A_ : List[str] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
A_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a ,_a ,atol=1e-3 ) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Any = config_and_inputs
A_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
a_ = (LlamaForCausalLM,) if is_torch_available() else ()
a_ = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = LlamaModelTester(self )
A_ : List[str] = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def _a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : Dict = type
self.model_tester.create_and_check_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = 3
A_ : Any = input_dict["""input_ids"""]
A_ : Union[str, Any] = input_ids.ne(1 ).to(_a )
A_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : int = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Dict ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : str = 3
A_ : Union[str, Any] = """single_label_classification"""
A_ : Union[str, Any] = input_dict["""input_ids"""]
A_ : List[Any] = input_ids.ne(1 ).to(_a )
A_ : Dict = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : List[str] = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = 3
A_ : Dict = """multi_label_classification"""
A_ : Any = input_dict["""input_ids"""]
A_ : Optional[Any] = input_ids.ne(1 ).to(_a )
A_ : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
A_ : Optional[int] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def _a ( self : Any ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _a ( self : Optional[Any] ,_a : List[Any] ):
'''simple docstring'''
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ids_tensor([1, 10] ,config.vocab_size )
A_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : int = LlamaModel(_a )
original_model.to(_a )
original_model.eval()
A_ : Tuple = original_model(_a ).last_hidden_state
A_ : Union[str, Any] = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : Tuple = {"""type""": scaling_type, """factor""": 10.0}
A_ : int = LlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
A_ : List[Any] = scaled_model(_a ).last_hidden_state
A_ : Any = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a ,_a ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Any = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" ,device_map="""auto""" )
A_ : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : str ):
'''simple docstring'''
A_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""auto""" )
A_ : int = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : Optional[int] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
A_ : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
A_ : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
A_ : List[str] = """Simply put, the theory of relativity states that """
A_ : Any = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
A_ : Union[str, Any] = tokenizer.encode(_a ,return_tensors="""pt""" )
A_ : List[str] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""sequential""" ,use_safetensors=_a )
# greedy generation outputs
A_ : str = model.generate(_a ,max_new_tokens=64 ,top_p=_a ,temperature=1 ,do_sample=_a )
A_ : Optional[Any] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=_a )
self.assertEqual(_a ,_a )
| 665 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowercase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: Any = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE: List[str] = 'ViltImageProcessor'
SCREAMING_SNAKE_CASE: List[str] = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , lowerCamelCase__=None , lowerCamelCase__=None , **lowerCamelCase__ ):
lowerCAmelCase_: Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _a , )
lowerCAmelCase_: List[Any] = kwargs.pop("feature_extractor" )
lowerCAmelCase_: Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_a , _a )
lowerCAmelCase_: Dict = self.image_processor
def __call__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = True , lowerCamelCase__ = None , **lowerCamelCase__ , ):
lowerCAmelCase_: Tuple = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel_values + pixel_mask
lowerCAmelCase_: List[Any] = self.image_processor(_a , return_tensors=_a )
encoding.update(_a )
return encoding
def _a ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
return self.tokenizer.batch_decode(*_a , **_a )
def _a ( self , *lowerCamelCase__ , **lowerCamelCase__ ):
return self.tokenizer.decode(*_a , **_a )
@property
def _a ( self ):
lowerCAmelCase_: Dict = self.tokenizer.model_input_names
lowerCAmelCase_: List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _a ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , )
return self.image_processor_class
@property
def _a ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , )
return self.image_processor | 613 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__magic_name__ = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__magic_name__ = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__magic_name__ = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def _a ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) ,homepage="""https://github.com/hendrycks/math""" ,codebase_urls=["""https://github.com/hendrycks/math"""] ,)
def _a ( self : List[Any] ,_a : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = 0.0
for i, j in zip(_a ,_a ):
n_correct += 1.0 if math_equivalence.is_equiv(_a ,_a ) else 0.0
A_ : List[str] = n_correct / len(_a )
return {
"accuracy": accuracy,
}
| 665 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : int = logging.get_logger(__name__)
UpperCAmelCase : Optional[int] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase : int = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
},
"""tokenizer_file""": {
"""google/bigbird-roberta-base""": (
"""https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"""
),
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"""
),
},
}
UpperCAmelCase : List[str] = {
"""google/bigbird-roberta-base""": 4096,
"""google/bigbird-roberta-large""": 4096,
"""google/bigbird-base-trivia-itc""": 4096,
}
UpperCAmelCase : Dict = """▁"""
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE):
_lowercase : List[Any] = VOCAB_FILES_NAMES
_lowercase : str = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[int] = BigBirdTokenizer
_lowercase : List[Any] = ["""input_ids""", """attention_mask"""]
_lowercase : int = []
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="[MASK]" , lowerCAmelCase__="[CLS]" , **lowerCAmelCase__ , ) -> str:
'''simple docstring'''
a__ : Union[str, Any] =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token
a__ : Optional[int] =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
a__ : List[str] =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token
a__ : Optional[int] =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
a__ : List[str] =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token
a__ : Optional[int] =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
a__ : int =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , )
a__ : List[Any] =vocab_file
a__ : Optional[int] =False if not self.vocab_file else True
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple:
'''simple docstring'''
a__ : int =[self.sep_token_id]
a__ : Any =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> Optional[Any]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> str:
'''simple docstring'''
a__ : List[Any] =[self.sep_token_id]
a__ : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(_a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ : int =os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 563 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """retribert"""
def __init__( self : int ,_a : Dict=30522 ,_a : List[Any]=768 ,_a : Optional[Any]=8 ,_a : str=12 ,_a : str=3072 ,_a : Tuple="gelu" ,_a : Optional[int]=0.1 ,_a : Dict=0.1 ,_a : List[Any]=512 ,_a : Union[str, Any]=2 ,_a : Tuple=0.02 ,_a : List[str]=1e-12 ,_a : Dict=True ,_a : Tuple=128 ,_a : Optional[int]=0 ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,**_a )
A_ : Dict = vocab_size
A_ : int = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : int = intermediate_size
A_ : Tuple = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : Optional[int] = initializer_range
A_ : Dict = layer_norm_eps
A_ : str = share_encoders
A_ : List[Any] = projection_dim
| 665 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = TFXLMRobertaModel.from_pretrained('jplu/tf-xlm-roberta-base' )
SCREAMING_SNAKE_CASE_ = {
"""input_ids""": tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]], dtype=tf.intaa ), # "My dog is cute"
"""attention_mask""": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]], dtype=tf.intaa ),
}
SCREAMING_SNAKE_CASE_ = model(_a )["""last_hidden_state"""]
SCREAMING_SNAKE_CASE_ = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape, _a )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE_ = tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
], dtype=tf.floataa, )
self.assertTrue(np.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1E-4 ) )
| 294 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'spiece.model'}
__magic_name__ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
__magic_name__ = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = []
def __init__( self : Optional[int] ,_a : int ,_a : Optional[Any]="<unk>" ,_a : int="<s>" ,_a : str="</s>" ,_a : Optional[Any]="<pad>" ,_a : Tuple="[SEP]" ,_a : Tuple="[MASK]" ,_a : Union[str, Any]="[CLS]" ,_a : Optional[Dict[str, Any]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : List[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
A_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,pad_token=_a ,sep_token=_a ,mask_token=_a ,cls_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
A_ : Optional[int] = vocab_file
A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Tuple = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self.__dict__.copy()
A_ : Union[str, Any] = None
return state
def __setstate__( self : List[Any] ,_a : Any ):
'''simple docstring'''
A_ : Tuple = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
A_ : Tuple = {}
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self : Union[str, Any] ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def _a ( self : Optional[int] ,_a : str ):
'''simple docstring'''
return self.sp_model.piece_to_id(_a )
def _a ( self : int ,_a : Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.sp_model.IdToPiece(_a )
return token
def _a ( self : Dict ,_a : int ):
'''simple docstring'''
A_ : int = []
A_ : Any = """"""
A_ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
A_ : Dict = True
A_ : Union[str, Any] = []
else:
current_sub_tokens.append(_a )
A_ : str = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def _a ( self : int ,_a : List[int] ,_a : bool = False ,_a : bool = None ,_a : bool = True ,**_a : str ,):
'''simple docstring'''
A_ : Any = kwargs.pop("""use_source_tokenizer""" ,_a )
A_ : Union[str, Any] = self.convert_ids_to_tokens(_a ,skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A_ : str = []
A_ : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
A_ : List[str] = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
A_ : Optional[int] = re.sub(r""" (\[(MASK|SEP)\])""" ,r"""\1""" ,""" """.join(_a ) )
else:
A_ : Tuple = """""".join(_a )
A_ : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A_ : Optional[Any] = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,"""wb""" ) as fi:
A_ : str = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def _a ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : List[Any] = [self.cls_token_id]
A_ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def _a ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Tuple = [self.sep_token_id]
A_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 665 | 0 |
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__UpperCAmelCase = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
__UpperCAmelCase = {
# fairseq:
"""wmt19-ru-en""": {"""length_penalty""": 1.1},
"""wmt19-en-ru""": {"""length_penalty""": 1.15},
"""wmt19-en-de""": {"""length_penalty""": 1.0},
"""wmt19-de-en""": {"""length_penalty""": 1.1},
# allenai:
"""wmt16-en-de-dist-12-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-dist-6-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-12-1""": {"""length_penalty""": 0.8},
"""wmt19-de-en-6-6-base""": {"""length_penalty""": 0.6},
"""wmt19-de-en-6-6-big""": {"""length_penalty""": 0.6},
}
# this remaps the different models to their organization names
__UpperCAmelCase = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__UpperCAmelCase = """facebook"""
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
__UpperCAmelCase = """allenai"""
def snake_case_ (__A : Tuple ) -> Union[str, Any]:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
__lowerCAmelCase : Optional[Any] = dict((re.sub(r"""@@$""" , """""" , __A ), v) if k.endswith("""@@""" ) else (re.sub(r"""$""" , """</w>""" , __A ), v) for k, v in d.items() )
__lowerCAmelCase : Any = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
__lowerCAmelCase : List[Any] = d[k] # restore
return da
def snake_case_ (__A : List[str] , __A : Optional[Any] ) -> int:
# prep
assert os.path.exists(__A )
os.makedirs(__A , exist_ok=__A )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
__lowerCAmelCase : Dict = basename(__A )
__lowerCAmelCase : Optional[int] = dirname(__A )
__lowerCAmelCase : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
__lowerCAmelCase : str = cls.hub_models()
__lowerCAmelCase : Any = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
__lowerCAmelCase : str = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
__lowerCAmelCase : List[str] = hub_utils.from_pretrained(
__A , __A , __A , archive_map=__A , **__A )
__lowerCAmelCase : Tuple = vars(chkpt["""args"""]["""model"""] )
__lowerCAmelCase : Optional[int] = args["""source_lang"""]
__lowerCAmelCase : Tuple = args["""target_lang"""]
__lowerCAmelCase : List[Any] = dirname(__A )
__lowerCAmelCase : Any = basename(__A )
# dicts
__lowerCAmelCase : Optional[int] = os.path.join(__A , f'''dict.{src_lang}.txt''' )
__lowerCAmelCase : Any = os.path.join(__A , f'''dict.{tgt_lang}.txt''' )
__lowerCAmelCase : List[str] = Dictionary.load(__A )
__lowerCAmelCase : Optional[int] = rewrite_dict_keys(src_dict.indices )
__lowerCAmelCase : int = len(__A )
__lowerCAmelCase : Union[str, Any] = os.path.join(__A , """vocab-src.json""" )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
__lowerCAmelCase : Tuple = True
for k in src_vocab.keys():
if not k.islower():
__lowerCAmelCase : Tuple = False
break
__lowerCAmelCase : str = Dictionary.load(__A )
__lowerCAmelCase : Optional[int] = rewrite_dict_keys(tgt_dict.indices )
__lowerCAmelCase : Optional[int] = len(__A )
__lowerCAmelCase : Dict = os.path.join(__A , """vocab-tgt.json""" )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# merges_file (bpecodes)
__lowerCAmelCase : Optional[int] = os.path.join(__A , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
__lowerCAmelCase : Any = os.path.join(__A , __A )
if os.path.exists(__A ):
break
with open(__A , encoding="""utf-8""" ) as fin:
__lowerCAmelCase : str = fin.read()
__lowerCAmelCase : int = re.sub(r""" \d+$""" , """""" , __A , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(__A , """w""" , encoding="""utf-8""" ) as fout:
fout.write(__A )
# model config
__lowerCAmelCase : int = os.path.join(__A , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}'''
__lowerCAmelCase : Any = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
__lowerCAmelCase : Any = 5
__lowerCAmelCase : List[Any] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
__lowerCAmelCase : Tuple = best_score_hparams[model_dir]["""length_penalty"""]
else:
__lowerCAmelCase : Optional[Any] = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# tokenizer config
__lowerCAmelCase : Optional[Any] = os.path.join(__A , __A )
__lowerCAmelCase : int = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1_0_2_4,
"""do_lower_case""": do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(__A , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(__A , ensure_ascii=__A , indent=__A ) )
# model
__lowerCAmelCase : Dict = chkpt["""models"""][0]
__lowerCAmelCase : int = model.state_dict()
# rename keys to start with 'model.'
__lowerCAmelCase : Optional[Any] = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
__lowerCAmelCase : List[str] = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(__A , __A )
__lowerCAmelCase : str = FSMTConfig.from_pretrained(__A )
__lowerCAmelCase : Dict = FSMTForConditionalGeneration(__A )
# check that it loads ok
model_new.load_state_dict(__A , strict=__A )
# save
__lowerCAmelCase : Dict = os.path.join(__A , __A )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(__A , __A )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fsmt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__UpperCAmelCase = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 651 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _a ( self : List[str] ,_a : int ,_a : Any ,_a : int ):
'''simple docstring'''
A_ : Dict = TextaTextGenerationPipeline(model=_a ,tokenizer=_a )
return generator, ["Something to write", "Something else"]
def _a ( self : str ,_a : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : Any = generator("""Something there""" )
self.assertEqual(_a ,[{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
A_ : List[Any] = generator(["""This is great !""", """Something else"""] ,num_return_sequences=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
A_ : List[str] = generator(
["""This is great !""", """Something else"""] ,num_return_sequences=2 ,batch_size=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
with self.assertRaises(_a ):
generator(4 )
@require_torch
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : int = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""pt""" )
# do_sample=False necessary for reproducibility
A_ : Tuple = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
A_ : Optional[int] = 3
A_ : Tuple = generator(
"""Something there""" ,num_return_sequences=_a ,num_beams=_a ,)
A_ : Optional[Any] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a ,_a )
A_ : Optional[int] = generator("""This is a test""" ,do_sample=_a ,num_return_sequences=2 ,return_tensors=_a )
self.assertEqual(
_a ,[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] ,)
A_ : Dict = generator.model.config.eos_token_id
A_ : Optional[int] = """<pad>"""
A_ : List[Any] = generator(
["""This is a test""", """This is a second test"""] ,do_sample=_a ,num_return_sequences=2 ,batch_size=2 ,return_tensors=_a ,)
self.assertEqual(
_a ,[
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] ,)
@require_tf
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""tf""" )
# do_sample=False necessary for reproducibility
A_ : Dict = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
| 665 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase ={"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 681 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """gpt_bigcode"""
a_ = ["""past_key_values"""]
a_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int] ,_a : Optional[int]=50257 ,_a : Dict=1024 ,_a : Union[str, Any]=768 ,_a : Union[str, Any]=12 ,_a : Union[str, Any]=12 ,_a : Tuple=None ,_a : int="gelu_pytorch_tanh" ,_a : Optional[Any]=0.1 ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : List[Any]=1e-5 ,_a : List[str]=0.02 ,_a : Any=True ,_a : Union[str, Any]=True ,_a : Tuple=50256 ,_a : Optional[int]=50256 ,_a : int=True ,_a : Optional[int]=True ,_a : Optional[int]=True ,**_a : List[str] ,):
'''simple docstring'''
A_ : Optional[Any] = vocab_size
A_ : int = n_positions
A_ : Union[str, Any] = n_embd
A_ : int = n_layer
A_ : Optional[int] = n_head
A_ : Union[str, Any] = n_inner
A_ : List[Any] = activation_function
A_ : Dict = resid_pdrop
A_ : int = embd_pdrop
A_ : Optional[int] = attn_pdrop
A_ : Union[str, Any] = layer_norm_epsilon
A_ : int = initializer_range
A_ : Union[str, Any] = scale_attn_weights
A_ : List[str] = use_cache
A_ : Tuple = attention_softmax_in_fpaa
A_ : List[str] = scale_attention_softmax_in_fpaa
A_ : Union[str, Any] = multi_query
A_ : Any = bos_token_id
A_ : Optional[int] = eos_token_id
super().__init__(bos_token_id=_a ,eos_token_id=_a ,**_a )
| 665 | 0 |
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
lowercase_ = logging.get_logger(__name__)
def lowerCamelCase ( __lowerCamelCase : nn.ModuleList , __lowerCamelCase : nn.ModuleList , __lowerCamelCase : List[int] ) ->List[str]:
_SCREAMING_SNAKE_CASE = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__lowerCamelCase ) == len(__lowerCamelCase ), F'{len(__lowerCamelCase )} != {len(__lowerCamelCase )}'
dest_layers.load_state_dict(layers_to_copy.state_dict() )
lowercase_ = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
lowercase_ = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Dict ) ->Any:
try:
_SCREAMING_SNAKE_CASE = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'
F' {n_student}' )
return list(range(__lowerCamelCase ) )
def lowerCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Union[str, Any] ) ->Optional[Any]:
if n_student > n_teacher:
raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}' )
elif n_teacher == n_student:
return list(range(__lowerCamelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def lowerCamelCase ( __lowerCamelCase : Union[str, PreTrainedModel] , __lowerCamelCase : Union[str, Path] = "student" , __lowerCamelCase : Union[int, None] = None , __lowerCamelCase : Union[int, None] = None , __lowerCamelCase : Dict=False , __lowerCamelCase : List[str]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : str , ) ->Optional[int]:
_SCREAMING_SNAKE_CASE = """encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."""
assert (e is not None) or (d is not None), _msg
if isinstance(__lowerCamelCase , __lowerCamelCase ):
AutoTokenizer.from_pretrained(__lowerCamelCase ).save_pretrained(__lowerCamelCase ) # purely for convenience
_SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase ).eval()
else:
assert isinstance(__lowerCamelCase , __lowerCamelCase ), F'teacher must be a model or string got type {type(__lowerCamelCase )}'
_SCREAMING_SNAKE_CASE = teacher.config.to_diff_dict()
try:
_SCREAMING_SNAKE_CASE = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_SCREAMING_SNAKE_CASE = teacher_e
if d is None:
_SCREAMING_SNAKE_CASE = teacher_d
init_kwargs.update({"""encoder_layers""": e, """decoder_layers""": d} )
except AttributeError: # T5
if hasattr(teacher.config , """num_encoder_layers""" ):
_SCREAMING_SNAKE_CASE = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_SCREAMING_SNAKE_CASE = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_SCREAMING_SNAKE_CASE = teacher_e
if d is None:
_SCREAMING_SNAKE_CASE = teacher_d
if hasattr(teacher.config , """num_encoder_layers""" ):
init_kwargs.update({"""num_encoder_layers""": e, """num_decoder_layers""": d} )
else:
init_kwargs.update({"""num_layers""": e, """num_decoder_layers""": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__lowerCamelCase )
# Copy weights
_SCREAMING_SNAKE_CASE = teacher.config_class(**__lowerCamelCase )
_SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_config(__lowerCamelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_SCREAMING_SNAKE_CASE = student.load_state_dict(teacher.state_dict() , strict=__lowerCamelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_SCREAMING_SNAKE_CASE = list(range(__lowerCamelCase ) ), list(range(__lowerCamelCase ) )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'
F' {save_path}' )
student.save_pretrained(__lowerCamelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_SCREAMING_SNAKE_CASE = pick_layers_to_copy(__lowerCamelCase , __lowerCamelCase )
if d_layers_to_copy is None:
_SCREAMING_SNAKE_CASE = pick_layers_to_copy(__lowerCamelCase , __lowerCamelCase )
try:
if hasattr(
__lowerCamelCase , """prophetnet""" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __lowerCamelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __lowerCamelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __lowerCamelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __lowerCamelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __lowerCamelCase )
copy_layers(teacher.decoder.block , student.decoder.block , __lowerCamelCase )
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}' )
_SCREAMING_SNAKE_CASE = {
"""teacher_type""": teacher.config.model_type,
"""copied_encoder_layers""": e_layers_to_copy,
"""copied_decoder_layers""": d_layers_to_copy,
}
student.save_pretrained(__lowerCamelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 314 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__magic_name__ = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__magic_name__ = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase ( ):
A_ : Union[str, Any] = (
list(range(ord("""!""") , ord("""~""") + 1)) + list(range(ord("""¡""") , ord("""¬""") + 1)) + list(range(ord("""®""") , ord("""ÿ""") + 1))
)
A_ : Optional[Any] = bs[:]
A_ : List[str] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase)
cs.append(2**8 + n)
n += 1
A_ : List[Any] = [chr(lowerCamelCase) for n in cs]
return dict(zip(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int):
A_ : int = set()
A_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
A_ : List[str] = char
return pairs
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : int ,_a : Tuple ,_a : Union[str, Any] ,_a : Optional[Any]="replace" ,_a : Union[str, Any]="<s>" ,_a : Union[str, Any]="</s>" ,_a : int="</s>" ,_a : List[str]="<s>" ,_a : List[Any]="<unk>" ,_a : Any="<pad>" ,_a : Dict="<mask>" ,_a : Optional[int]=False ,**_a : List[Any] ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
super().__init__(
errors=_a ,bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,**_a ,)
with open(_a ,encoding="""utf-8""" ) as vocab_handle:
A_ : str = json.load(_a )
A_ : Optional[int] = {v: k for k, v in self.encoder.items()}
A_ : List[str] = errors # how to handle errors in decoding
A_ : List[str] = bytes_to_unicode()
A_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_a ,encoding="""utf-8""" ) as merges_handle:
A_ : Any = merges_handle.read().split("""\n""" )[1:-1]
A_ : str = [tuple(merge.split() ) for merge in bpe_merges]
A_ : int = dict(zip(_a ,range(len(_a ) ) ) )
A_ : List[Any] = {}
A_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A_ : Optional[Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def _a ( self : Any ):
'''simple docstring'''
return len(self.encoder )
def _a ( self : str ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def _a ( self : int ,_a : int ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A_ : Optional[int] = tuple(_a )
A_ : Any = get_pairs(_a )
if not pairs:
return token
while True:
A_ : Optional[Any] = min(_a ,key=lambda _a : self.bpe_ranks.get(_a ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A_ , A_ : Dict = bigram
A_ : int = []
A_ : Optional[Any] = 0
while i < len(_a ):
try:
A_ : List[str] = word.index(_a ,_a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A_ : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A_ : str = tuple(_a )
A_ : str = new_word
if len(_a ) == 1:
break
else:
A_ : int = get_pairs(_a )
A_ : Optional[int] = """ """.join(_a )
A_ : List[str] = word
return word
def _a ( self : Dict ,_a : Optional[int] ):
'''simple docstring'''
A_ : Any = []
for token in re.findall(self.pat ,_a ):
A_ : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(""" """ ) )
return bpe_tokens
def _a ( self : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
return self.encoder.get(_a ,self.encoder.get(self.unk_token ) )
def _a ( self : int ,_a : Dict ):
'''simple docstring'''
return self.decoder.get(_a )
def _a ( self : Optional[int] ,_a : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = """""".join(_a )
A_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_a ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_a ,ensure_ascii=_a ) + """\n""" )
A_ : int = 0
with open(_a ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
A_ : Dict = token_index
writer.write(""" """.join(_a ) + """\n""" )
index += 1
return vocab_file, merge_file
def _a ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : int = [self.cls_token_id]
A_ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Union[str, Any] = [self.sep_token_id]
A_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : str ,_a : Optional[int] ,_a : Union[str, Any]=False ,**_a : Dict ):
'''simple docstring'''
A_ : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
A_ : Optional[int] = """ """ + text
return (text, kwargs)
| 665 | 0 |
def UpperCAmelCase ( lowercase = 10 , lowercase = 22 ):
"""simple docstring"""
__lowercase = range(1 , lowercase )
__lowercase = range(1 , lowercase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F'''{solution(1_0, 2_2) = }''') | 534 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.txt'}
__magic_name__ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__magic_name__ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__magic_name__ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ConvBertTokenizer
def __init__( self : str ,_a : Dict=None ,_a : List[Any]=None ,_a : Dict=True ,_a : List[str]="[UNK]" ,_a : Any="[SEP]" ,_a : str="[PAD]" ,_a : List[Any]="[CLS]" ,_a : List[str]="[MASK]" ,_a : Union[str, Any]=True ,_a : Any=None ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
A_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_a ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_a ) != tokenize_chinese_chars
):
A_ : Dict = getattr(_a ,normalizer_state.pop("""type""" ) )
A_ : str = do_lower_case
A_ : Any = strip_accents
A_ : int = tokenize_chinese_chars
A_ : Tuple = normalizer_class(**_a )
A_ : Any = do_lower_case
def _a ( self : List[Any] ,_a : List[Any] ,_a : Any=None ):
'''simple docstring'''
A_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : int = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : List[Any] = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 665 | 0 |
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_lowercase : List[Any] =logging.get_logger(__name__)
class snake_case__ (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , *__lowercase , **__lowercase ) -> List[str]:
"""simple docstring"""
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , _a , )
super().__init__(*_a , **_a )
| 136 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__magic_name__ = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = BartTokenizer
def __init__( self : str ,_a : Any=None ,_a : Optional[int]=None ,_a : int=None ,_a : Optional[int]="replace" ,_a : Dict="<s>" ,_a : Optional[Any]="</s>" ,_a : Dict="</s>" ,_a : Tuple="<s>" ,_a : Optional[Any]="<unk>" ,_a : List[str]="<pad>" ,_a : int="<mask>" ,_a : str=False ,_a : List[str]=True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(
_a ,_a ,tokenizer_file=_a ,errors=_a ,bos_token=_a ,eos_token=_a ,sep_token=_a ,cls_token=_a ,unk_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,trim_offsets=_a ,**_a ,)
A_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : List[str] = getattr(_a ,pre_tok_state.pop("""type""" ) )
A_ : Optional[int] = add_prefix_space
A_ : int = pre_tok_class(**_a )
A_ : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ : str = """post_processor"""
A_ : List[Any] = getattr(self.backend_tokenizer ,_a ,_a )
if tokenizer_component_instance:
A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : Tuple = tuple(state["""sep"""] )
if "cls" in state:
A_ : Tuple = tuple(state["""cls"""] )
A_ : List[str] = False
if state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : Dict = add_prefix_space
A_ : Any = True
if state.get("""trim_offsets""" ,_a ) != trim_offsets:
A_ : Union[str, Any] = trim_offsets
A_ : List[Any] = True
if changes_to_apply:
A_ : Optional[int] = getattr(_a ,state.pop("""type""" ) )
A_ : Tuple = component_class(**_a )
setattr(self.backend_tokenizer ,_a ,_a )
@property
def _a ( self : List[str] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _a ( self : Union[str, Any] ,_a : Any ):
'''simple docstring'''
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else value
A_ : List[Any] = value
def _a ( self : str ,*_a : str ,**_a : Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*_a ,**_a )
def _a ( self : str ,*_a : List[Any] ,**_a : str ):
'''simple docstring'''
A_ : List[str] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*_a ,**_a )
def _a ( self : Optional[int] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : str = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
def _a ( self : str ,_a : Optional[int] ,_a : int=None ):
'''simple docstring'''
A_ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Dict = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 665 | 0 |
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Dict,_SCREAMING_SNAKE_CASE : List[str],_SCREAMING_SNAKE_CASE : Union[str, Any]=[] ):
"""simple docstring"""
__A= size[0] - overlap_pixels * 2
__A= size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__A= np.ones((size_y, size_x),dtype=np.uinta ) * 255
__A= np.pad(_SCREAMING_SNAKE_CASE,mode='linear_ramp',pad_width=_SCREAMING_SNAKE_CASE,end_values=0 )
if "l" in remove_borders:
__A= mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__A= mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__A= mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__A= mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Union[str, Any],_SCREAMING_SNAKE_CASE : List[str],_SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
return max(_SCREAMING_SNAKE_CASE,min(_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : [int],_SCREAMING_SNAKE_CASE : [int],_SCREAMING_SNAKE_CASE : [int] ):
"""simple docstring"""
return (
clamp(rect[0],min[0],max[0] ),
clamp(rect[1],min[1],max[1] ),
clamp(rect[2],min[0],max[0] ),
clamp(rect[3],min[1],max[1] ),
)
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : [int],_SCREAMING_SNAKE_CASE : int,_SCREAMING_SNAKE_CASE : [int] ):
"""simple docstring"""
__A= list(_SCREAMING_SNAKE_CASE )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__A= clamp_rect(_SCREAMING_SNAKE_CASE,[0, 0],[image_size[0], image_size[1]] )
return rect
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Dict,_SCREAMING_SNAKE_CASE : List[Any],_SCREAMING_SNAKE_CASE : List[Any],_SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__A= Image.new('RGB',(tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]),Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ),(0, 0),)
result.paste(_SCREAMING_SNAKE_CASE,(original_slice, 0) )
return result
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Optional[int],_SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
__A= (original_image_slice * 4, 0, tile.size[0], tile.size[1])
__A= tile.crop(_SCREAMING_SNAKE_CASE )
return tile
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Any,_SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
__A= n % d
return n - divisor
class a__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : AutoencoderKL , lowerCAmelCase_ : CLIPTextModel , lowerCAmelCase_ : CLIPTokenizer , lowerCAmelCase_ : UNetaDConditionModel , lowerCAmelCase_ : DDPMScheduler , lowerCAmelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase_ : int = 350 , ) -> Optional[Any]:
super().__init__(
vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , low_res_scheduler=_a , scheduler=_a , max_noise_level=_a , )
def lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
torch.manual_seed(0 )
__A= (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
__A= add_overlap_rect(_a , _a , image.size )
__A= image.crop(_a )
__A= ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__A= translated_slice_x - (original_image_slice / 2)
__A= max(0 , _a )
__A= squeeze_tile(_a , _a , _a , _a )
__A= to_input.size
__A= to_input.resize((tile_size, tile_size) , Image.BICUBIC )
__A= super(_a , self ).__call__(image=_a , **_a ).images[0]
__A= upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
__A= unsqueeze_tile(_a , _a )
__A= upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
__A= []
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
__A= Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=_a ) , mode='L' , )
final_image.paste(
_a , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , _a )
@torch.no_grad()
def __call__( self : Optional[int] , lowerCAmelCase_ : Union[str, List[str]] , lowerCAmelCase_ : Union[PIL.Image.Image, List[PIL.Image.Image]] , lowerCAmelCase_ : int = 75 , lowerCAmelCase_ : float = 9.0 , lowerCAmelCase_ : int = 50 , lowerCAmelCase_ : Optional[Union[str, List[str]]] = None , lowerCAmelCase_ : Optional[int] = 1 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : Optional[torch.Generator] = None , lowerCAmelCase_ : Optional[torch.FloatTensor] = None , lowerCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : int = 128 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 32 , ) -> Tuple:
__A= Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
__A= math.ceil(image.size[0] / tile_size )
__A= math.ceil(image.size[1] / tile_size )
__A= tcx * tcy
__A= 0
for y in range(_a ):
for x in range(_a ):
self._process_tile(
_a , _a , _a , _a , _a , _a , _a , prompt=_a , num_inference_steps=_a , guidance_scale=_a , noise_level=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def UpperCAmelCase__( ):
"""simple docstring"""
__A= """stabilityai/stable-diffusion-x4-upscaler"""
__A= StableDiffusionTiledUpscalePipeline.from_pretrained(_SCREAMING_SNAKE_CASE,revision='fp16',torch_dtype=torch.floataa )
__A= pipe.to('cuda' )
__A= Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(_SCREAMING_SNAKE_CASE : List[str] ):
print(f"""progress: {obj["progress"]:.4f}""" )
obj["image"].save('diffusers_library_progress.jpg' )
__A= pipe(image=_SCREAMING_SNAKE_CASE,prompt='Black font, white background, vector',noise_level=40,callback=_SCREAMING_SNAKE_CASE )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 186 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
A_ : int = TapasConfig.from_json_file(lowerCamelCase)
# set absolute/relative position embeddings parameter
A_ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
A_ : Optional[int] = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WTQ":
# run_task_main.py hparams
A_ : Tuple = 4
A_ : Optional[Any] = True
# hparam_utils.py hparams
A_ : Any = 0.66_4694
A_ : str = 0.20_7951
A_ : Any = 0.12_1194
A_ : str = True
A_ : Dict = True
A_ : int = False
A_ : int = 0.035_2513
A_ : Tuple = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
A_ : int = 4
A_ : Union[str, Any] = False
# hparam_utils.py hparams
A_ : Dict = 36.4519
A_ : List[Any] = 0.90_3421
A_ : Any = 222.088
A_ : Optional[Any] = True
A_ : Optional[int] = True
A_ : Optional[Any] = True
A_ : Optional[int] = 0.76_3141
A_ : Any = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "TABFACT":
A_ : Any = TapasForSequenceClassification(config=lowerCamelCase)
elif task == "MLM":
A_ : List[Any] = TapasForMaskedLM(config=lowerCamelCase)
elif task == "INTERMEDIATE_PRETRAINING":
A_ : Union[str, Any] = TapasModel(config=lowerCamelCase)
else:
raise ValueError(F'Task {task} not supported.')
print(F'Building PyTorch model from configuration: {config}')
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}')
model.save_pretrained(lowerCamelCase)
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}')
A_ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512)
tokenizer.save_pretrained(lowerCamelCase)
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 665 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( A__ , A__ ):
a_ = 0
a_ = len(A__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
a_ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(A__ ):
return None
a_ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
a_ = left
a_ = point
elif point > right:
a_ = right
a_ = point
else:
if item < current_item:
a_ = point - 1
else:
a_ = point + 1
return None
def UpperCamelCase_ ( A__ , A__ , A__ , A__ ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
a_ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(A__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(A__ , A__ , A__ , A__ )
elif point > right:
return interpolation_search_by_recursion(A__ , A__ , A__ , A__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
A__ , A__ , A__ , point - 1 )
else:
return interpolation_search_by_recursion(
A__ , A__ , point + 1 , A__ )
def UpperCamelCase_ ( A__ ):
if collection != sorted(A__ ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
lowercase__ =0
if debug == 1:
lowercase__ =[10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
lowercase__ =67
lowercase__ =interpolation_search(collection, target)
if result is not None:
print(F"""{target} found at positions: {result}""")
else:
print('Not found')
| 263 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""vqvae"""]
def __init__( self : Optional[Any] ,_a : AutoencoderKL ,_a : UNetaDConditionModel ,_a : Mel ,_a : Union[DDIMScheduler, DDPMScheduler] ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_a ,scheduler=_a ,mel=_a ,vqvae=_a )
def _a ( self : str ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_a ) else 1000
@torch.no_grad()
def __call__( self : Optional[int] ,_a : int = 1 ,_a : str = None ,_a : np.ndarray = None ,_a : int = 0 ,_a : int = 0 ,_a : int = None ,_a : torch.Generator = None ,_a : float = 0 ,_a : float = 0 ,_a : torch.Generator = None ,_a : float = 0 ,_a : torch.Tensor = None ,_a : torch.Tensor = None ,_a : int=True ,):
'''simple docstring'''
A_ : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_a )
A_ : Union[str, Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
A_ : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
A_ : int = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_a ,device=self.device ,)
A_ : List[Any] = noise
A_ : str = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_a ,_a )
A_ : Any = self.mel.audio_slice_to_image(_a )
A_ : Union[str, Any] = np.frombuffer(input_image.tobytes() ,dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
A_ : Optional[Any] = (input_image / 255) * 2 - 1
A_ : Union[str, Any] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
A_ : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(_a ,0 ) ).latent_dist.sample(
generator=_a )[0]
A_ : List[str] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
A_ : Any = self.scheduler.add_noise(_a ,_a ,self.scheduler.timesteps[start_step - 1] )
A_ : Tuple = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
A_ : Tuple = int(mask_start_secs * pixels_per_second )
A_ : str = int(mask_end_secs * pixels_per_second )
A_ : int = self.scheduler.add_noise(_a ,_a ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_a ):
A_ : Optional[Any] = self.unet(_a ,_a ,_a )["""sample"""]
else:
A_ : List[Any] = self.unet(_a ,_a )["""sample"""]
if isinstance(self.scheduler ,_a ):
A_ : Dict = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,eta=_a ,generator=_a ,)["""prev_sample"""]
else:
A_ : Any = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,generator=_a ,)["""prev_sample"""]
if mask is not None:
if mask_start > 0:
A_ : Tuple = mask[:, step, :, :mask_start]
if mask_end > 0:
A_ : List[str] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
A_ : str = 1 / self.vqvae.config.scaling_factor * images
A_ : Union[str, Any] = self.vqvae.decode(_a )["""sample"""]
A_ : int = (images / 2 + 0.5).clamp(0 ,1 )
A_ : str = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
A_ : Optional[int] = (images * 255).round().astype("""uint8""" )
A_ : List[Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_a ,mode="""RGB""" ).convert("""L""" ) for _ in images) )
A_ : Tuple = [self.mel.image_to_audio(_a ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_a )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_a ) )
@torch.no_grad()
def _a ( self : Union[str, Any] ,_a : List[Image.Image] ,_a : int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_a )
self.scheduler.set_timesteps(_a )
A_ : Optional[Any] = np.array(
[np.frombuffer(image.tobytes() ,dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
A_ : List[str] = (sample / 255) * 2 - 1
A_ : Optional[int] = torch.Tensor(_a ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
A_ : List[str] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
A_ : Any = self.scheduler.alphas_cumprod[t]
A_ : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
A_ : str = 1 - alpha_prod_t
A_ : List[str] = self.unet(_a ,_a )["""sample"""]
A_ : str = (1 - alpha_prod_t_prev) ** 0.5 * model_output
A_ : Union[str, Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
A_ : Optional[int] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _a ( _a : torch.Tensor ,_a : torch.Tensor ,_a : float ):
'''simple docstring'''
A_ : List[Any] = acos(torch.dot(torch.flatten(_a ) ,torch.flatten(_a ) ) / torch.norm(_a ) / torch.norm(_a ) )
return sin((1 - alpha) * theta ) * xa / sin(_a ) + sin(alpha * theta ) * xa / sin(_a )
| 665 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : list[int | str] ) -> int:
create_state_space_tree(UpperCAmelCase_ , [] , 0 , [0 for i in range(len(UpperCAmelCase_ ) )] )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : list[int | str] , UpperCAmelCase_ : list[int | str] , UpperCAmelCase_ : int , UpperCAmelCase_ : list[int] , ) -> List[Any]:
if index == len(UpperCAmelCase_ ):
print(UpperCAmelCase_ )
return
for i in range(len(UpperCAmelCase_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
SCREAMING_SNAKE_CASE_ : Optional[int] =True
create_state_space_tree(UpperCAmelCase_ , UpperCAmelCase_ , index + 1 , UpperCAmelCase_ )
current_sequence.pop()
SCREAMING_SNAKE_CASE_ : List[Any] =False
_lowercase = [3, 1, 2, 4]
generate_all_permutations(sequence)
_lowercase = ["""A""", """B""", """C"""]
generate_all_permutations(sequence_a)
| 443 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__magic_name__ = 16
__magic_name__ = 32
def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16):
A_ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""")
A_ : str = load_dataset("""glue""" , """mrpc""")
def tokenize_function(lowerCamelCase : Dict):
# max_length=None => use the model max length (it's actually the default)
A_ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A_ : Tuple = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(lowerCamelCase : Tuple):
# On TPU it's best to pad everything to the same length or training will be very slow.
A_ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A_ : List[Any] = 16
elif accelerator.mixed_precision != "no":
A_ : Any = 8
else:
A_ : Tuple = None
return tokenizer.pad(
lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
A_ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=lowerCamelCase)
A_ : str = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Dict):
# Initialize accelerator
A_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : List[Any] = config["""lr"""]
A_ : List[Any] = int(config["""num_epochs"""])
A_ : int = int(config["""seed"""])
A_ : Dict = int(config["""batch_size"""])
A_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""")
# If the batch size is too big we use gradient accumulation
A_ : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A_ : Any = batch_size // MAX_GPU_BATCH_SIZE
A_ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(lowerCamelCase)
A_ , A_ : List[str] = get_dataloaders(lowerCamelCase , lowerCamelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ : str = model.to(accelerator.device)
# Instantiate optimizer
A_ : str = AdamW(params=model.parameters() , lr=lowerCamelCase)
# Instantiate scheduler
A_ : Tuple = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ , A_ , A_ , A_ , A_ : Union[str, Any] = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Now we train the model
for epoch in range(lowerCamelCase):
model.train()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
A_ : Optional[int] = model(**lowerCamelCase)
A_ : List[Any] = outputs.loss
A_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
A_ : Union[str, Any] = model(**lowerCamelCase)
A_ : Any = outputs.logits.argmax(dim=-1)
A_ , A_ : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]))
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
A_ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase)
def lowerCamelCase ( ):
A_ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""")
parser.add_argument(
"""--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""")
A_ : Dict = parser.parse_args()
A_ : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 0 |
from random import shuffle
import tensorflow as tf
from numpy import array
def snake_case__ ( lowercase , lowercase ):
lowerCAmelCase_: Tuple = int(lowercase )
assert noofclusters < len(lowercase )
# Find out the dimensionality
lowerCAmelCase_: Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowerCAmelCase_: Dict = list(range(len(lowercase ) ) )
shuffle(lowercase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowerCAmelCase_: Optional[Any] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowerCAmelCase_: Union[str, Any] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowerCAmelCase_: str = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowercase )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowerCAmelCase_: Dict = tf.placeholder("float64" , [dim] )
lowerCAmelCase_: Tuple = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowercase , lowercase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowerCAmelCase_: Tuple = [tf.Variable(0 ) for i in range(len(lowercase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowerCAmelCase_: Optional[Any] = tf.placeholder("int32" )
lowerCAmelCase_: Any = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowercase , lowercase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowerCAmelCase_: Dict = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowerCAmelCase_: Optional[Any] = tf.reduce_mean(lowercase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowerCAmelCase_: Optional[Any] = tf.placeholder("float" , [dim] )
lowerCAmelCase_: int = tf.placeholder("float" , [dim] )
lowerCAmelCase_: int = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowercase , lowercase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowerCAmelCase_: List[str] = tf.placeholder("float" , [noofclusters] )
lowerCAmelCase_: int = tf.argmin(lowercase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowerCAmelCase_: int = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowercase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowerCAmelCase_: Dict = 100
for _ in range(lowercase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowercase ) ):
lowerCAmelCase_: List[Any] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowerCAmelCase_: int = [
sess.run(lowercase , feed_dict={va: vect, va: sess.run(lowercase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowerCAmelCase_: Optional[int] = sess.run(
lowercase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowercase ):
# Collect all the vectors assigned to this cluster
lowerCAmelCase_: List[str] = [
vectors[i]
for i in range(len(lowercase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowerCAmelCase_: str = sess.run(
lowercase , feed_dict={mean_input: array(lowercase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowerCAmelCase_: List[str] = sess.run(lowercase )
lowerCAmelCase_: Optional[int] = sess.run(lowercase )
return centroids, assignments | 613 |
'''simple docstring'''
import functools
def lowerCamelCase ( lowerCamelCase : list[int] , lowerCamelCase : list[int]):
# Validation
if not isinstance(lowerCamelCase , lowerCamelCase) or not all(isinstance(lowerCamelCase , lowerCamelCase) for day in days):
raise ValueError("""The parameter days should be a list of integers""")
if len(lowerCamelCase) != 3 or not all(isinstance(lowerCamelCase , lowerCamelCase) for cost in costs):
raise ValueError("""The parameter costs should be a list of three integers""")
if len(lowerCamelCase) == 0:
return 0
if min(lowerCamelCase) <= 0:
raise ValueError("""All days elements should be greater than 0""")
if max(lowerCamelCase) >= 366:
raise ValueError("""All days elements should be less than 366""")
A_ : Tuple = set(lowerCamelCase)
@functools.cache
def dynamic_programming(lowerCamelCase : int) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1)
return min(
costs[0] + dynamic_programming(index + 1) , costs[1] + dynamic_programming(index + 7) , costs[2] + dynamic_programming(index + 30) , )
return dynamic_programming(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE):
_lowercase : Dict = """vivit"""
def __init__( self , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=3_2 , lowerCAmelCase__=[2, 1_6, 1_6] , lowerCAmelCase__=3 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu_fast" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-06 , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> List[Any]:
'''simple docstring'''
a__ : List[str] =hidden_size
a__ : List[str] =num_hidden_layers
a__ : Optional[int] =num_attention_heads
a__ : str =intermediate_size
a__ : List[str] =hidden_act
a__ : Any =hidden_dropout_prob
a__ : Any =attention_probs_dropout_prob
a__ : Any =initializer_range
a__ : Dict =layer_norm_eps
a__ : Dict =image_size
a__ : str =num_frames
a__ : Optional[int] =tubelet_size
a__ : Any =num_channels
a__ : Tuple =qkv_bias
super().__init__(**_a )
| 563 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCamelCase ( lowerCamelCase : NDArray[floataa] , lowerCamelCase : NDArray[floataa] , lowerCamelCase : list[int] , lowerCamelCase : int , ):
A_ , A_ : int = coefficient_matrix.shape
A_ , A_ : Union[str, Any] = constant_matrix.shape
if rowsa != colsa:
A_ : Any = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if colsa != 1:
A_ : Tuple = F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if rowsa != rowsa:
A_ : Dict = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(lowerCamelCase)
if len(lowerCamelCase) != rowsa:
A_ : Union[str, Any] = (
"""Number of initial values must be equal to number of rows in coefficient """
F'matrix but received {len(lowerCamelCase)} and {rowsa}'
)
raise ValueError(lowerCamelCase)
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""")
A_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1)
A_ , A_ : int = table.shape
strictly_diagonally_dominant(lowerCamelCase)
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase):
A_ : List[Any] = []
for row in range(lowerCamelCase):
A_ : int = 0
for col in range(lowerCamelCase):
if col == row:
A_ : List[str] = table[row][col]
elif col == cols - 1:
A_ : str = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ : Union[str, Any] = (temp + val) / denom
new_val.append(lowerCamelCase)
A_ : Tuple = new_val
return [float(lowerCamelCase) for i in new_val]
def lowerCamelCase ( lowerCamelCase : NDArray[floataa]):
A_ , A_ : Dict = table.shape
A_ : Union[str, Any] = True
for i in range(0 , lowerCamelCase):
A_ : str = 0
for j in range(0 , cols - 1):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""")
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
def _UpperCamelCase ( lowerCAmelCase__: Namespace ) -> List[Any]:
return TrainCommand(lowerCAmelCase__ )
class snake_case ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@staticmethod
def a__ ( _lowercase ) -> Any:
SCREAMING_SNAKE_CASE_ = parser.add_parser('train', help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data', type=_a, required=_a, help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.', )
train_parser.add_argument(
'--column_label', type=_a, default=0, help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text', type=_a, default=1, help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id', type=_a, default=2, help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row', action='store_true', help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data', type=_a, default='', help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split', type=_a, default=0.1, help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.', )
train_parser.add_argument('--output', type=_a, default='./', help='path to saved the trained model.' )
train_parser.add_argument(
'--task', type=_a, default='text_classification', help='Task to train the model on.' )
train_parser.add_argument(
'--model', type=_a, default='bert-base-uncased', help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size', type=_a, default=32, help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size', type=_a, default=64, help='Batch size for validation.' )
train_parser.add_argument('--learning_rate', type=_a, default=3E-5, help='Learning rate.' )
train_parser.add_argument('--adam_epsilon', type=_a, default=1E-08, help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=_a )
def __init__( self, _lowercase ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = logging.get_logger('transformers-cli/training' )
SCREAMING_SNAKE_CASE_ = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output, exist_ok=_a )
SCREAMING_SNAKE_CASE_ = args.output
SCREAMING_SNAKE_CASE_ = args.column_label
SCREAMING_SNAKE_CASE_ = args.column_text
SCREAMING_SNAKE_CASE_ = args.column_id
self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
SCREAMING_SNAKE_CASE_ = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"""Loading dataset from {args.train_data}""" )
SCREAMING_SNAKE_CASE_ = Processor.create_from_csv(
args.train_data, column_label=args.column_label, column_text=args.column_text, column_id=args.column_id, skip_first_row=args.skip_first_row, )
SCREAMING_SNAKE_CASE_ = None
if args.validation_data:
self.logger.info(f"""Loading validation dataset from {args.validation_data}""" )
SCREAMING_SNAKE_CASE_ = Processor.create_from_csv(
args.validation_data, column_label=args.column_label, column_text=args.column_text, column_id=args.column_id, skip_first_row=args.skip_first_row, )
SCREAMING_SNAKE_CASE_ = args.validation_split
SCREAMING_SNAKE_CASE_ = args.train_batch_size
SCREAMING_SNAKE_CASE_ = args.valid_batch_size
SCREAMING_SNAKE_CASE_ = args.learning_rate
SCREAMING_SNAKE_CASE_ = args.adam_epsilon
def a__ ( self ) -> Optional[int]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def a__ ( self ) -> str:
raise NotImplementedError
def a__ ( self ) -> Optional[Any]:
self.pipeline.fit(
self.train_dataset, validation_data=self.valid_dataset, validation_split=self.validation_split, learning_rate=self.learning_rate, adam_epsilon=self.adam_epsilon, train_batch_size=self.train_batch_size, valid_batch_size=self.valid_batch_size, )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 294 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
A_ : Any = len(lowerCamelCase)
A_ : Optional[Any] = len(lowerCamelCase)
A_ : Optional[int] = [[False for _ in range(m + 1)] for _ in range(n + 1)]
A_ : Union[str, Any] = True
for i in range(lowerCamelCase):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A_ : Optional[int] = True
if a[i].islower():
A_ : List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__UpperCAmelCase = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def snake_case_ (__A : Union[str, Any] , __A : str ) -> Any:
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def snake_case_ (__A : Tuple ) -> Optional[Any]:
__lowerCAmelCase : int = _TestCommandArgs(dataset=__A , all_configs=__A , save_infos=__A )
__lowerCAmelCase : Optional[int] = TestCommand(*__A )
test_command.run()
__lowerCAmelCase : List[str] = os.path.join(__A , """README.md""" )
assert os.path.exists(__A )
__lowerCAmelCase : Any = DatasetInfosDict.from_directory(__A )
__lowerCAmelCase : int = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2_3_5_1_5_6_3,
"""num_examples""": 1_0_0_0_0,
},
{
"""name""": """validation""",
"""num_bytes""": 2_3_8_4_1_8,
"""num_examples""": 1_0_0_0,
},
] , download_size=3_9_4_0_6_8_0 , dataset_size=2_5_8_9_9_8_1 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
__lowerCAmelCase : List[str] = getattr(dataset_infos["""default"""] , __A ), getattr(expected_dataset_infos["""default"""] , __A )
if key == "num_bytes":
assert is_apercent_close(__A , __A )
elif key == "splits":
assert list(__A ) == list(__A )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 651 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
a_ = 42
a_ = 42
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : list[list[Edge]] = [[] for _ in range(_a )]
A_ : List[Any] = size
def __getitem__( self : int ,_a : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _a ( self : str ):
'''simple docstring'''
return self._size
def _a ( self : str ,_a : int ,_a : int ,_a : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(_a ,_a ) )
def _a ( self : Dict ,_a : int ,_a : int ):
'''simple docstring'''
A_ : Tuple = deque([start_vertex] )
A_ : list[int | None] = [None] * self.size
A_ : Union[str, Any] = 0
while queue:
A_ : List[Any] = queue.popleft()
A_ : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A_ : Union[str, Any] = current_distance + edge.weight
A_ : Optional[Any] = distances[edge.destination_vertex]
if (
isinstance(_a ,_a )
and new_distance >= dest_vertex_distance
):
continue
A_ : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
class A__ :
def __init__( self , __magic_name__ ):
lowerCamelCase : int = size
lowerCamelCase : Dict = [0] * size
lowerCamelCase : Any = [0] * size
@staticmethod
def UpperCamelCase__ ( __magic_name__ ):
return index | (index + 1)
@staticmethod
def UpperCamelCase__ ( __magic_name__ ):
return (index & (index + 1)) - 1
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
lowerCamelCase : Tuple = value
while index < self.size:
lowerCamelCase : Optional[int] = self.get_prev(_a ) + 1
if current_left_border == index:
lowerCamelCase : Dict = value
else:
lowerCamelCase : Tuple = max(_a , _a , _a )
lowerCamelCase : List[str] = self.get_next(_a )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__ ):
right -= 1 # Because of right is exclusive
lowerCamelCase : List[Any] = 0
while left <= right:
lowerCamelCase : List[str] = self.get_prev(_a )
if left <= current_left:
lowerCamelCase : int = max(_a , self.tree[right] )
lowerCamelCase : int = current_left
else:
lowerCamelCase : str = max(_a , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int = 10**9):
A_ : Optional[int] = 1
A_ : int = 2
A_ : List[Any] = 0
A_ : Optional[Any] = 0
A_ : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
A_ : Optional[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 665 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowercase_ = (3, 9, -11, 0, 7, 5, 1, -1)
lowercase_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class a_ :
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
class a_ :
'''simple docstring'''
def __init__( self , A ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = None
for i in sorted(_a , reverse=_a ):
_SCREAMING_SNAKE_CASE = Node(_a , self.head )
def __iter__( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.head
while node:
yield node.data
_SCREAMING_SNAKE_CASE = node.next_node
def __len__( self ) -> Optional[Any]:
return sum(1 for _ in self )
def __str__( self ) -> Union[str, Any]:
return " -> ".join([str(_a ) for node in self] )
def lowerCamelCase ( __lowerCamelCase : SortedLinkedList , __lowerCamelCase : SortedLinkedList ) ->Any:
return SortedLinkedList(list(__lowerCamelCase ) + list(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 314 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase ( ):
A_ : Optional[int] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCamelCase)
A_ : Optional[int] = parser.add_subparsers(help="""accelerate command helpers""")
# Register commands
get_config_parser(subparsers=lowerCamelCase)
env_command_parser(subparsers=lowerCamelCase)
launch_command_parser(subparsers=lowerCamelCase)
tpu_command_parser(subparsers=lowerCamelCase)
test_command_parser(subparsers=lowerCamelCase)
# Let's go
A_ : Dict = parser.parse_args()
if not hasattr(lowerCamelCase , """func"""):
parser.print_help()
exit(1)
# Run
args.func(lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__a : Tuple = logging.get_logger(__name__)
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
if isinstance(lowercase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class _UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__a : Optional[Any] = ['''pixel_values''']
def __init__( self , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = PILImageResampling.BILINEAR , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 2_55 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**_a )
__lowercase = size if size is not None else {"""shortest_edge""": 2_56}
__lowercase = get_size_dict(_a , default_to_square=_a )
__lowercase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
__lowercase = get_size_dict(_a , param_name='''crop_size''' )
__lowercase = do_resize
__lowercase = size
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = resample
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = offset
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = PILImageResampling.BILINEAR , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> List[str]:
'''simple docstring'''
__lowercase = get_size_dict(_a , default_to_square=_a )
if "shortest_edge" in size:
__lowercase = get_resize_output_image_size(_a , size['''shortest_edge'''] , default_to_square=_a )
elif "height" in size and "width" in size:
__lowercase = (size["""height"""], size["""width"""])
else:
raise ValueError(F"Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}" )
return resize(_a , size=_a , resample=_a , data_format=_a , **_a )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> int:
'''simple docstring'''
__lowercase = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have \'height\' and \'width\' as keys. Got {size.keys()}" )
return center_crop(_a , size=(size['''height'''], size['''width''']) , data_format=_a , **_a )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Tuple:
'''simple docstring'''
__lowercase = image.astype(np.floataa )
if offset:
__lowercase = image - (scale / 2)
return rescale(_a , scale=_a , data_format=_a , **_a )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Dict:
'''simple docstring'''
return normalize(_a , mean=_a , std=_a , data_format=_a , **_a )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , ) -> Optional[Any]:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
__lowercase = to_numpy_array(_a )
if do_resize:
__lowercase = self.resize(image=_a , size=_a , resample=_a )
if do_center_crop:
__lowercase = self.center_crop(_a , size=_a )
if do_rescale:
__lowercase = self.rescale(image=_a , scale=_a , offset=_a )
if do_normalize:
__lowercase = self.normalize(image=_a , mean=_a , std=_a )
__lowercase = to_channel_dimension_format(_a , _a )
return image
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ) -> Dict:
'''simple docstring'''
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = resample if resample is not None else self.resample
__lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = offset if offset is not None else self.offset
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(_a , default_to_square=_a )
__lowercase = crop_size if crop_size is not None else self.crop_size
__lowercase = get_size_dict(_a , param_name='''crop_size''' )
if not valid_images(_a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
__lowercase = make_batched(_a )
__lowercase = [
[
self._preprocess_image(
image=_a , do_resize=_a , size=_a , resample=_a , do_center_crop=_a , crop_size=_a , do_rescale=_a , rescale_factor=_a , offset=_a , do_normalize=_a , image_mean=_a , image_std=_a , data_format=_a , )
for img in video
]
for video in videos
]
__lowercase = {"""pixel_values""": videos}
return BatchFeature(data=_a , tensor_type=_a ) | 534 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 0 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
a__ : Union[str, Any] = 0
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Tuple = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Union[str, Any] = Path(_a ) / """preprocessor_config.json"""
a__ : Union[str, Any] = Path(_a ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_a , """w""" ) )
a__ : int = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Dict = Path(_a ) / """preprocessor_config.json"""
a__ : int = Path(_a ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_a , """w""" ) )
a__ : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : Any = CLIPConfig()
# Create a dummy config file with image_proceesor_type
a__ : str = Path(_a ) / """preprocessor_config.json"""
a__ : Dict = Path(_a ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_a , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
a__ : List[Any] = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop("""image_processor_type""" )
a__ : Dict = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
a__ : List[str] = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
a__ : Tuple = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : List[Any] = Path(_a ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
a__ : Tuple = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
with self.assertRaisesRegex(
_a , """clip-base is not a local folder and is not a valid model identifier""" ):
a__ : str = AutoImageProcessor.from_pretrained("""clip-base""" )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaisesRegex(
_a , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
a__ : Union[str, Any] = AutoImageProcessor.from_pretrained(_a , revision="""aaaaaa""" )
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
with self.assertRaisesRegex(
_a , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
a__ : Optional[int] = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaises(_a ):
a__ : str = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
a__ : Any = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_a )
a__ : Union[str, Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
a__ : Union[str, Any] = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
try:
AutoConfig.register("""custom""" , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : int = Path(_a ) / """preprocessor_config.json"""
a__ : int = Path(_a ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_a , """w""" ) )
a__ : str = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
a__ : Any = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
class snake_case__ (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase :int = True
try:
AutoConfig.register("""custom""" , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
a__ : Optional[int] = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
a__ : Tuple = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
a__ : Any = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(_a , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 136 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['YolosFeatureExtractor']
__magic_name__ = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 0 |
'''simple docstring'''
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__A= generate_pascal_triangle(_SCREAMING_SNAKE_CASE )
for row_idx in range(_SCREAMING_SNAKE_CASE ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx],end=' ' )
else:
print(triangle[row_idx][col_idx],end='' )
print()
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
__A= []
for current_row_idx in range(_SCREAMING_SNAKE_CASE ):
__A= populate_current_row(_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE )
triangle.append(_SCREAMING_SNAKE_CASE )
return triangle
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : list[list[int]],_SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__A= [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
__A= 1, 1
for current_col_idx in range(1,_SCREAMING_SNAKE_CASE ):
calculate_current_element(
_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE )
return current_row
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : list[list[int]],_SCREAMING_SNAKE_CASE : list[int],_SCREAMING_SNAKE_CASE : int,_SCREAMING_SNAKE_CASE : int,):
"""simple docstring"""
__A= triangle[current_row_idx - 1][current_col_idx - 1]
__A= triangle[current_row_idx - 1][current_col_idx]
__A= above_to_left_elt + above_to_right_elt
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
__A= [[1]]
for row_index in range(1,_SCREAMING_SNAKE_CASE ):
__A= [0] + result[-1] + [0]
__A= row_index + 1
# Calculate the number of distinct elements in a row
__A= sum(divmod(_SCREAMING_SNAKE_CASE,2 ) )
__A= [
temp_row[i - 1] + temp_row[i] for i in range(1,distinct_elements + 1 )
]
__A= row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
__A= row_first_half + row_second_half
result.append(_SCREAMING_SNAKE_CASE )
return result
def UpperCAmelCase__( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_SCREAMING_SNAKE_CASE : Callable,_SCREAMING_SNAKE_CASE : int ) -> None:
__A= f"""{func.__name__}({value})"""
__A= timeit(f"""__main__.{call}""",setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(f"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 186 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 0 |
'''simple docstring'''
lowercase__ =[
(10_00, 'M'),
(9_00, 'CM'),
(5_00, 'D'),
(4_00, 'CD'),
(1_00, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def UpperCamelCase_ ( A__ ):
a_ = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 1_00, """D""": 5_00, """M""": 10_00}
a_ = 0
a_ = 0
while place < len(A__ ):
if (place + 1 < len(A__ )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def UpperCamelCase_ ( A__ ):
a_ = []
for arabic, roman in ROMAN:
(a_) = divmod(A__ , A__ )
result.append(roman * factor )
if number == 0:
break
return "".join(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 263 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : str = [0] * len(lowerCamelCase)
A_ : Union[str, Any] = []
A_ : Union[str, Any] = []
A_ : Tuple = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCamelCase)):
if indegree[i] == 0:
queue.append(lowerCamelCase)
while queue:
A_ : Any = queue.pop(0)
cnt += 1
topo.append(lowerCamelCase)
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowerCamelCase)
if cnt != len(lowerCamelCase):
print("""Cycle exists""")
else:
print(lowerCamelCase)
# Adjacency List of Graph
__magic_name__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 665 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 443 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[str]=7 ,_a : Dict=True ,_a : List[Any]=True ,_a : Dict=False ,_a : Optional[int]=True ,_a : List[Any]=99 ,_a : Any=32 ,_a : Optional[int]=5 ,_a : List[Any]=4 ,_a : int=37 ,_a : List[Any]="gelu" ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : Any=512 ,_a : int=16 ,_a : Optional[int]=2 ,_a : Any=0.02 ,_a : Any=3 ,_a : Any=4 ,_a : List[str]=None ,):
'''simple docstring'''
A_ : List[str] = parent
A_ : Any = batch_size
A_ : Tuple = seq_length
A_ : List[str] = is_training
A_ : Tuple = use_input_mask
A_ : Dict = use_token_type_ids
A_ : List[Any] = use_labels
A_ : Union[str, Any] = vocab_size
A_ : Any = hidden_size
A_ : str = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : str = intermediate_size
A_ : Tuple = hidden_act
A_ : Any = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : int = type_vocab_size
A_ : Union[str, Any] = type_sequence_label_size
A_ : Any = initializer_range
A_ : List[Any] = num_labels
A_ : Optional[Any] = num_choices
A_ : List[Any] = scope
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : int = None
if self.use_input_mask:
A_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Dict = None
if self.use_token_type_ids:
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : str = None
A_ : Any = None
A_ : str = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
A_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
def _a ( self : Union[str, Any] ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Any ,_a : Any ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Tuple ):
'''simple docstring'''
A_ : Any = LlamaModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[Any] = model(_a ,attention_mask=_a )
A_ : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Optional[int] ,_a : int ,_a : List[str] ,_a : Any ,_a : Any ,_a : Dict ,_a : List[str] ,_a : Optional[int] ,_a : Any ,_a : List[str] ,):
'''simple docstring'''
A_ : List[str] = True
A_ : Union[str, Any] = LlamaModel(_a )
model.to(_a )
model.eval()
A_ : Tuple = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,)
A_ : List[Any] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,)
A_ : int = model(_a ,attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Any ,_a : Any ,_a : Optional[int] ,_a : List[Any] ,_a : List[Any] ,_a : Dict ,_a : Tuple ,_a : Optional[int] ,_a : List[Any] ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : List[Any] = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
A_ : Dict = model(_a ,attention_mask=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : str ,_a : List[Any] ,_a : Dict ,_a : str ,_a : Tuple ,_a : Tuple ,_a : Tuple ,_a : Optional[Any] ,_a : Dict ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : Optional[Any] = True
A_ : Any = True
A_ : Tuple = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
A_ : Optional[int] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,use_cache=_a ,)
A_ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A_ : List[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
A_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
A_ : int = torch.cat([input_mask, next_mask] ,dim=-1 )
A_ : List[str] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
A_ : Any = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,past_key_values=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
# select random slice
A_ : List[str] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
A_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a ,_a ,atol=1e-3 ) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Any = config_and_inputs
A_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
a_ = (LlamaForCausalLM,) if is_torch_available() else ()
a_ = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = LlamaModelTester(self )
A_ : List[str] = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def _a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : Dict = type
self.model_tester.create_and_check_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = 3
A_ : Any = input_dict["""input_ids"""]
A_ : Union[str, Any] = input_ids.ne(1 ).to(_a )
A_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : int = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Dict ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : str = 3
A_ : Union[str, Any] = """single_label_classification"""
A_ : Union[str, Any] = input_dict["""input_ids"""]
A_ : List[Any] = input_ids.ne(1 ).to(_a )
A_ : Dict = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : List[str] = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = 3
A_ : Dict = """multi_label_classification"""
A_ : Any = input_dict["""input_ids"""]
A_ : Optional[Any] = input_ids.ne(1 ).to(_a )
A_ : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
A_ : Optional[int] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def _a ( self : Any ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _a ( self : Optional[Any] ,_a : List[Any] ):
'''simple docstring'''
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ids_tensor([1, 10] ,config.vocab_size )
A_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : int = LlamaModel(_a )
original_model.to(_a )
original_model.eval()
A_ : Tuple = original_model(_a ).last_hidden_state
A_ : Union[str, Any] = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : Tuple = {"""type""": scaling_type, """factor""": 10.0}
A_ : int = LlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
A_ : List[Any] = scaled_model(_a ).last_hidden_state
A_ : Any = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a ,_a ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Any = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" ,device_map="""auto""" )
A_ : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : str ):
'''simple docstring'''
A_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""auto""" )
A_ : int = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : Optional[int] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
A_ : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
A_ : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
A_ : List[str] = """Simply put, the theory of relativity states that """
A_ : Any = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
A_ : Union[str, Any] = tokenizer.encode(_a ,return_tensors="""pt""" )
A_ : List[str] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""sequential""" ,use_safetensors=_a )
# greedy generation outputs
A_ : str = model.generate(_a ,max_new_tokens=64 ,top_p=_a ,temperature=1 ,do_sample=_a )
A_ : Optional[Any] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=_a )
self.assertEqual(_a ,_a )
| 665 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a : Any = {
"""configuration_owlvit""": [
"""OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""OwlViTConfig""",
"""OwlViTOnnxConfig""",
"""OwlViTTextConfig""",
"""OwlViTVisionConfig""",
],
"""processing_owlvit""": ["""OwlViTProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ["""OwlViTFeatureExtractor"""]
a : int = ["""OwlViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
"""OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""OwlViTModel""",
"""OwlViTPreTrainedModel""",
"""OwlViTTextModel""",
"""OwlViTVisionModel""",
"""OwlViTForObjectDetection""",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 613 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__magic_name__ = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__magic_name__ = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__magic_name__ = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def _a ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) ,homepage="""https://github.com/hendrycks/math""" ,codebase_urls=["""https://github.com/hendrycks/math"""] ,)
def _a ( self : List[Any] ,_a : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = 0.0
for i, j in zip(_a ,_a ):
n_correct += 1.0 if math_equivalence.is_equiv(_a ,_a ) else 0.0
A_ : List[str] = n_correct / len(_a )
return {
"accuracy": accuracy,
}
| 665 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE):
_lowercase : Optional[int] = ["""image_processor""", """tokenizer"""]
_lowercase : str = """LayoutLMv3ImageProcessor"""
_lowercase : List[str] = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""")
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
a__ : List[str] =None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _a , )
a__ : Dict =kwargs.pop("feature_extractor" )
a__ : Tuple =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_a , _a )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
a__ : Dict =self.image_processor(images=_a , return_tensors=_a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_a , _a ):
a__ : List[Any] =[text] # add batch dimension (as the image processor always adds a batch dimension)
a__ : Any =features["""words"""]
a__ : Union[str, Any] =self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel values
a__ : Tuple =features.pop("pixel_values" )
if return_overflowing_tokens is True:
a__ : Union[str, Any] =self.get_overflowing_images(_a , encoded_inputs["overflow_to_sample_mapping"] )
a__ : Optional[int] =images
return encoded_inputs
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
a__ : Any =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_a ) != len(_a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F''' {len(_a )} and {len(_a )}''' )
return images_with_overflow
def _lowercase ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_a , **_a )
def _lowercase ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*_a , **_a )
@property
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _a , )
return self.image_processor_class
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _a , )
return self.image_processor
| 563 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """retribert"""
def __init__( self : int ,_a : Dict=30522 ,_a : List[Any]=768 ,_a : Optional[Any]=8 ,_a : str=12 ,_a : str=3072 ,_a : Tuple="gelu" ,_a : Optional[int]=0.1 ,_a : Dict=0.1 ,_a : List[Any]=512 ,_a : Union[str, Any]=2 ,_a : Tuple=0.02 ,_a : List[str]=1e-12 ,_a : Dict=True ,_a : Tuple=128 ,_a : Optional[int]=0 ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,**_a )
A_ : Dict = vocab_size
A_ : int = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : int = intermediate_size
A_ : Tuple = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : Optional[int] = initializer_range
A_ : Dict = layer_norm_eps
A_ : str = share_encoders
A_ : List[Any] = projection_dim
| 665 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
SCREAMING_SNAKE_CASE : Tuple = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def _UpperCamelCase ( lowerCAmelCase__: Optional[int] ) -> Tuple:
for pegasus_name, hf_name in PATTERNS:
SCREAMING_SNAKE_CASE_ = k.replace(lowerCAmelCase__ ,lowerCAmelCase__ )
return k
def _UpperCamelCase ( lowerCAmelCase__: dict ,lowerCAmelCase__: dict ) -> List[str]:
SCREAMING_SNAKE_CASE_ = DEFAULTS.copy()
cfg_kwargs.update(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = PegasusConfig(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = PegasusForConditionalGeneration(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = torch_model.model.state_dict()
SCREAMING_SNAKE_CASE_ = {}
for k, v in tf_weights.items():
SCREAMING_SNAKE_CASE_ = rename_state_dict_key(lowerCAmelCase__ )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
SCREAMING_SNAKE_CASE_ = v.T
SCREAMING_SNAKE_CASE_ = torch.tensor(lowerCAmelCase__ ,dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
SCREAMING_SNAKE_CASE_ = torch.zeros_like(mapping['shared.weight'][cfg.pad_token_id + 1] )
SCREAMING_SNAKE_CASE_ = mapping["""shared.weight"""]
SCREAMING_SNAKE_CASE_ = mapping["""shared.weight"""]
SCREAMING_SNAKE_CASE_ = {k: torch.zeros_like(lowerCAmelCase__ ) for k, v in sd.items() if k.endswith('bias' ) and k not in mapping}
mapping.update(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = torch_model.model.load_state_dict(lowerCAmelCase__ ,strict=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = [
k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def _UpperCamelCase ( lowerCAmelCase__: str="./ckpt/aeslc/model.ckpt-32000" ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = tf.train.list_variables(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = ["""Adafactor""", """global_step"""]
for name, shape in tqdm(lowerCAmelCase__ ,desc='converting tf checkpoint to dict' ):
SCREAMING_SNAKE_CASE_ = any(pat in name for pat in ignore_name )
if skip_key:
continue
SCREAMING_SNAKE_CASE_ = tf.train.load_variable(lowerCAmelCase__ ,lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = array
return tf_weights
def _UpperCamelCase ( lowerCAmelCase__: str ,lowerCAmelCase__: str ) -> Optional[Any]:
# save tokenizer first
SCREAMING_SNAKE_CASE_ = Path(lowerCAmelCase__ ).parent.name
SCREAMING_SNAKE_CASE_ = task_specific_params[F"""summarization_{dataset}"""]["""max_position_embeddings"""]
SCREAMING_SNAKE_CASE_ = PegasusTokenizer.from_pretrained('sshleifer/pegasus' ,model_max_length=lowerCAmelCase__ )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(lowerCAmelCase__ )
# convert model
SCREAMING_SNAKE_CASE_ = get_tf_weights_as_numpy(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
SCREAMING_SNAKE_CASE_ = task_specific_params
SCREAMING_SNAKE_CASE_ = convert_pegasus(lowerCAmelCase__ ,lowerCAmelCase__ )
torch_model.save_pretrained(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = torch_model.state_dict()
sd.pop('model.decoder.embed_positions.weight' )
sd.pop('model.encoder.embed_positions.weight' )
torch.save(lowerCAmelCase__ ,Path(lowerCAmelCase__ ) / 'pytorch_model.bin' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
SCREAMING_SNAKE_CASE : int = parser.parse_args()
if args.save_dir is None:
SCREAMING_SNAKE_CASE : List[str] = Path(args.tf_ckpt_path).parent.name
SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 294 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'spiece.model'}
__magic_name__ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
__magic_name__ = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = []
def __init__( self : Optional[int] ,_a : int ,_a : Optional[Any]="<unk>" ,_a : int="<s>" ,_a : str="</s>" ,_a : Optional[Any]="<pad>" ,_a : Tuple="[SEP]" ,_a : Tuple="[MASK]" ,_a : Union[str, Any]="[CLS]" ,_a : Optional[Dict[str, Any]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : List[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
A_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,pad_token=_a ,sep_token=_a ,mask_token=_a ,cls_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
A_ : Optional[int] = vocab_file
A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Tuple = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self.__dict__.copy()
A_ : Union[str, Any] = None
return state
def __setstate__( self : List[Any] ,_a : Any ):
'''simple docstring'''
A_ : Tuple = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
A_ : Tuple = {}
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self : Union[str, Any] ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def _a ( self : Optional[int] ,_a : str ):
'''simple docstring'''
return self.sp_model.piece_to_id(_a )
def _a ( self : int ,_a : Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.sp_model.IdToPiece(_a )
return token
def _a ( self : Dict ,_a : int ):
'''simple docstring'''
A_ : int = []
A_ : Any = """"""
A_ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
A_ : Dict = True
A_ : Union[str, Any] = []
else:
current_sub_tokens.append(_a )
A_ : str = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def _a ( self : int ,_a : List[int] ,_a : bool = False ,_a : bool = None ,_a : bool = True ,**_a : str ,):
'''simple docstring'''
A_ : Any = kwargs.pop("""use_source_tokenizer""" ,_a )
A_ : Union[str, Any] = self.convert_ids_to_tokens(_a ,skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A_ : str = []
A_ : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
A_ : List[str] = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
A_ : Optional[int] = re.sub(r""" (\[(MASK|SEP)\])""" ,r"""\1""" ,""" """.join(_a ) )
else:
A_ : Tuple = """""".join(_a )
A_ : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A_ : Optional[Any] = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,"""wb""" ) as fi:
A_ : str = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def _a ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : List[Any] = [self.cls_token_id]
A_ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def _a ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Tuple = [self.sep_token_id]
A_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 665 | 0 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : str =FunnelTokenizer
lowerCamelCase : Optional[Any] =FunnelTokenizerFast
lowerCamelCase : Optional[Any] =True
lowerCamelCase : List[str] =True
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowerCAmelCase : Tuple = [
"""<unk>""",
"""<cls>""",
"""<sep>""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , **lowerCAmelCase : str ) -> Any:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_a )
def SCREAMING_SNAKE_CASE ( self : Dict , **lowerCAmelCase : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : int = """UNwant\u00E9d,running"""
__lowerCAmelCase : Optional[Any] = """unwanted, running"""
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.tokenizer_class(self.vocab_file )
__lowerCAmelCase : Union[str, Any] = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(_a , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 10, 8, 9] )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : int = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
__lowerCAmelCase : Optional[Any] = tokenizer("""UNwant\u00E9d,running""" )
__lowerCAmelCase : int = len(inputs["""input_ids"""] ) - 1
self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len )
__lowerCAmelCase : List[str] = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" )
self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
| 651 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _a ( self : List[str] ,_a : int ,_a : Any ,_a : int ):
'''simple docstring'''
A_ : Dict = TextaTextGenerationPipeline(model=_a ,tokenizer=_a )
return generator, ["Something to write", "Something else"]
def _a ( self : str ,_a : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : Any = generator("""Something there""" )
self.assertEqual(_a ,[{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
A_ : List[Any] = generator(["""This is great !""", """Something else"""] ,num_return_sequences=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
A_ : List[str] = generator(
["""This is great !""", """Something else"""] ,num_return_sequences=2 ,batch_size=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
with self.assertRaises(_a ):
generator(4 )
@require_torch
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : int = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""pt""" )
# do_sample=False necessary for reproducibility
A_ : Tuple = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
A_ : Optional[int] = 3
A_ : Tuple = generator(
"""Something there""" ,num_return_sequences=_a ,num_beams=_a ,)
A_ : Optional[Any] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a ,_a )
A_ : Optional[int] = generator("""This is a test""" ,do_sample=_a ,num_return_sequences=2 ,return_tensors=_a )
self.assertEqual(
_a ,[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] ,)
A_ : Dict = generator.model.config.eos_token_id
A_ : Optional[int] = """<pad>"""
A_ : List[Any] = generator(
["""This is a test""", """This is a second test"""] ,do_sample=_a ,num_return_sequences=2 ,batch_size=2 ,return_tensors=_a ,)
self.assertEqual(
_a ,[
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] ,)
@require_tf
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""tf""" )
# do_sample=False necessary for reproducibility
A_ : Dict = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
| 665 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase ={
"""configuration_altclip""": [
"""ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AltCLIPConfig""",
"""AltCLIPTextConfig""",
"""AltCLIPVisionConfig""",
],
"""processing_altclip""": ["""AltCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"""ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AltCLIPPreTrainedModel""",
"""AltCLIPModel""",
"""AltCLIPTextModel""",
"""AltCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 681 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """gpt_bigcode"""
a_ = ["""past_key_values"""]
a_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int] ,_a : Optional[int]=50257 ,_a : Dict=1024 ,_a : Union[str, Any]=768 ,_a : Union[str, Any]=12 ,_a : Union[str, Any]=12 ,_a : Tuple=None ,_a : int="gelu_pytorch_tanh" ,_a : Optional[Any]=0.1 ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : List[Any]=1e-5 ,_a : List[str]=0.02 ,_a : Any=True ,_a : Union[str, Any]=True ,_a : Tuple=50256 ,_a : Optional[int]=50256 ,_a : int=True ,_a : Optional[int]=True ,_a : Optional[int]=True ,**_a : List[str] ,):
'''simple docstring'''
A_ : Optional[Any] = vocab_size
A_ : int = n_positions
A_ : Union[str, Any] = n_embd
A_ : int = n_layer
A_ : Optional[int] = n_head
A_ : Union[str, Any] = n_inner
A_ : List[Any] = activation_function
A_ : Dict = resid_pdrop
A_ : int = embd_pdrop
A_ : Optional[int] = attn_pdrop
A_ : Union[str, Any] = layer_norm_epsilon
A_ : int = initializer_range
A_ : Union[str, Any] = scale_attn_weights
A_ : List[str] = use_cache
A_ : Tuple = attention_softmax_in_fpaa
A_ : List[str] = scale_attention_softmax_in_fpaa
A_ : Union[str, Any] = multi_query
A_ : Any = bos_token_id
A_ : Optional[int] = eos_token_id
super().__init__(bos_token_id=_a ,eos_token_id=_a ,**_a )
| 665 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""bigcode/gpt_bigcode-santacoder""": """https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json""",
}
class a_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''gpt_bigcode'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , A=5_0257 , A=1024 , A=768 , A=12 , A=12 , A=None , A="gelu_pytorch_tanh" , A=0.1 , A=0.1 , A=0.1 , A=1e-5 , A=0.02 , A=True , A=True , A=5_0256 , A=5_0256 , A=True , A=True , A=True , **A , ) -> Any:
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = n_positions
_SCREAMING_SNAKE_CASE = n_embd
_SCREAMING_SNAKE_CASE = n_layer
_SCREAMING_SNAKE_CASE = n_head
_SCREAMING_SNAKE_CASE = n_inner
_SCREAMING_SNAKE_CASE = activation_function
_SCREAMING_SNAKE_CASE = resid_pdrop
_SCREAMING_SNAKE_CASE = embd_pdrop
_SCREAMING_SNAKE_CASE = attn_pdrop
_SCREAMING_SNAKE_CASE = layer_norm_epsilon
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = scale_attn_weights
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = attention_softmax_in_fpaa
_SCREAMING_SNAKE_CASE = scale_attention_softmax_in_fpaa
_SCREAMING_SNAKE_CASE = multi_query
_SCREAMING_SNAKE_CASE = bos_token_id
_SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(bos_token_id=_a , eos_token_id=_a , **_a )
| 314 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__magic_name__ = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__magic_name__ = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase ( ):
A_ : Union[str, Any] = (
list(range(ord("""!""") , ord("""~""") + 1)) + list(range(ord("""¡""") , ord("""¬""") + 1)) + list(range(ord("""®""") , ord("""ÿ""") + 1))
)
A_ : Optional[Any] = bs[:]
A_ : List[str] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase)
cs.append(2**8 + n)
n += 1
A_ : List[Any] = [chr(lowerCamelCase) for n in cs]
return dict(zip(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int):
A_ : int = set()
A_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
A_ : List[str] = char
return pairs
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : int ,_a : Tuple ,_a : Union[str, Any] ,_a : Optional[Any]="replace" ,_a : Union[str, Any]="<s>" ,_a : Union[str, Any]="</s>" ,_a : int="</s>" ,_a : List[str]="<s>" ,_a : List[Any]="<unk>" ,_a : Any="<pad>" ,_a : Dict="<mask>" ,_a : Optional[int]=False ,**_a : List[Any] ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
super().__init__(
errors=_a ,bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,**_a ,)
with open(_a ,encoding="""utf-8""" ) as vocab_handle:
A_ : str = json.load(_a )
A_ : Optional[int] = {v: k for k, v in self.encoder.items()}
A_ : List[str] = errors # how to handle errors in decoding
A_ : List[str] = bytes_to_unicode()
A_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_a ,encoding="""utf-8""" ) as merges_handle:
A_ : Any = merges_handle.read().split("""\n""" )[1:-1]
A_ : str = [tuple(merge.split() ) for merge in bpe_merges]
A_ : int = dict(zip(_a ,range(len(_a ) ) ) )
A_ : List[Any] = {}
A_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A_ : Optional[Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def _a ( self : Any ):
'''simple docstring'''
return len(self.encoder )
def _a ( self : str ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def _a ( self : int ,_a : int ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A_ : Optional[int] = tuple(_a )
A_ : Any = get_pairs(_a )
if not pairs:
return token
while True:
A_ : Optional[Any] = min(_a ,key=lambda _a : self.bpe_ranks.get(_a ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A_ , A_ : Dict = bigram
A_ : int = []
A_ : Optional[Any] = 0
while i < len(_a ):
try:
A_ : List[str] = word.index(_a ,_a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A_ : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A_ : str = tuple(_a )
A_ : str = new_word
if len(_a ) == 1:
break
else:
A_ : int = get_pairs(_a )
A_ : Optional[int] = """ """.join(_a )
A_ : List[str] = word
return word
def _a ( self : Dict ,_a : Optional[int] ):
'''simple docstring'''
A_ : Any = []
for token in re.findall(self.pat ,_a ):
A_ : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(""" """ ) )
return bpe_tokens
def _a ( self : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
return self.encoder.get(_a ,self.encoder.get(self.unk_token ) )
def _a ( self : int ,_a : Dict ):
'''simple docstring'''
return self.decoder.get(_a )
def _a ( self : Optional[int] ,_a : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = """""".join(_a )
A_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_a ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_a ,ensure_ascii=_a ) + """\n""" )
A_ : int = 0
with open(_a ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
A_ : Dict = token_index
writer.write(""" """.join(_a ) + """\n""" )
index += 1
return vocab_file, merge_file
def _a ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : int = [self.cls_token_id]
A_ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Union[str, Any] = [self.sep_token_id]
A_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : str ,_a : Optional[int] ,_a : Union[str, Any]=False ,**_a : Dict ):
'''simple docstring'''
A_ : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
A_ : Optional[int] = """ """ + text
return (text, kwargs)
| 665 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : List[str] = logging.get_logger(__name__)
__a : Union[str, Any] = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class _UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__a : Optional[int] = '''markuplm'''
def __init__( self , lowerCAmelCase__=3_05_22 , lowerCAmelCase__=7_68 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=30_72 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=2_56 , lowerCAmelCase__=10_24 , lowerCAmelCase__=2_16 , lowerCAmelCase__=10_01 , lowerCAmelCase__=32 , lowerCAmelCase__=50 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Dict:
'''simple docstring'''
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a , )
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = position_embedding_type
__lowercase = use_cache
__lowercase = classifier_dropout
# additional properties
__lowercase = max_depth
__lowercase = max_xpath_tag_unit_embeddings
__lowercase = max_xpath_subs_unit_embeddings
__lowercase = tag_pad_id
__lowercase = subs_pad_id
__lowercase = xpath_unit_hidden_size | 534 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.txt'}
__magic_name__ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__magic_name__ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__magic_name__ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ConvBertTokenizer
def __init__( self : str ,_a : Dict=None ,_a : List[Any]=None ,_a : Dict=True ,_a : List[str]="[UNK]" ,_a : Any="[SEP]" ,_a : str="[PAD]" ,_a : List[Any]="[CLS]" ,_a : List[str]="[MASK]" ,_a : Union[str, Any]=True ,_a : Any=None ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
A_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_a ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_a ) != tokenize_chinese_chars
):
A_ : Dict = getattr(_a ,normalizer_state.pop("""type""" ) )
A_ : str = do_lower_case
A_ : Any = strip_accents
A_ : int = tokenize_chinese_chars
A_ : Tuple = normalizer_class(**_a )
A_ : Any = do_lower_case
def _a ( self : List[Any] ,_a : List[Any] ,_a : Any=None ):
'''simple docstring'''
A_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : int = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : List[Any] = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 665 | 0 |
def lowerCAmelCase_ ( _lowercase : str) -> Optional[int]:
"""simple docstring"""
a__ : List[Any] = 0
# if input_string is "aba" than new_input_string become "a|b|a"
a__ : Optional[int] = """"""
a__ : List[Any] = """"""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_lowercase) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
a__ : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
a__ : int = [1 for i in range(len(_lowercase))]
# for each character in new_string find corresponding palindromic string
a__ : Optional[Any] = 0
for j in range(len(_lowercase)):
a__ : Dict = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1)
while (
j - k >= 0
and j + k < len(_lowercase)
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
a__ : int = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
a__ : int = j - k + 1 # noqa: E741
a__ : str = j + k - 1
# update max_length and start position
if max_length < length[j]:
a__ : List[Any] = length[j]
a__ : int = j
# create that string
a__ : Optional[int] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__magic_name__ = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = BartTokenizer
def __init__( self : str ,_a : Any=None ,_a : Optional[int]=None ,_a : int=None ,_a : Optional[int]="replace" ,_a : Dict="<s>" ,_a : Optional[Any]="</s>" ,_a : Dict="</s>" ,_a : Tuple="<s>" ,_a : Optional[Any]="<unk>" ,_a : List[str]="<pad>" ,_a : int="<mask>" ,_a : str=False ,_a : List[str]=True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(
_a ,_a ,tokenizer_file=_a ,errors=_a ,bos_token=_a ,eos_token=_a ,sep_token=_a ,cls_token=_a ,unk_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,trim_offsets=_a ,**_a ,)
A_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : List[str] = getattr(_a ,pre_tok_state.pop("""type""" ) )
A_ : Optional[int] = add_prefix_space
A_ : int = pre_tok_class(**_a )
A_ : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ : str = """post_processor"""
A_ : List[Any] = getattr(self.backend_tokenizer ,_a ,_a )
if tokenizer_component_instance:
A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : Tuple = tuple(state["""sep"""] )
if "cls" in state:
A_ : Tuple = tuple(state["""cls"""] )
A_ : List[str] = False
if state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : Dict = add_prefix_space
A_ : Any = True
if state.get("""trim_offsets""" ,_a ) != trim_offsets:
A_ : Union[str, Any] = trim_offsets
A_ : List[Any] = True
if changes_to_apply:
A_ : Optional[int] = getattr(_a ,state.pop("""type""" ) )
A_ : Tuple = component_class(**_a )
setattr(self.backend_tokenizer ,_a ,_a )
@property
def _a ( self : List[str] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _a ( self : Union[str, Any] ,_a : Any ):
'''simple docstring'''
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else value
A_ : List[Any] = value
def _a ( self : str ,*_a : str ,**_a : Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*_a ,**_a )
def _a ( self : str ,*_a : List[Any] ,**_a : str ):
'''simple docstring'''
A_ : List[str] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*_a ,**_a )
def _a ( self : Optional[int] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : str = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
def _a ( self : str ,_a : Optional[int] ,_a : int=None ):
'''simple docstring'''
A_ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Dict = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 665 | 0 |
'''simple docstring'''
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : str,_SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__A= [[] for _ in range(_SCREAMING_SNAKE_CASE )]
__A= key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(_SCREAMING_SNAKE_CASE ) <= key:
return input_string
for position, character in enumerate(_SCREAMING_SNAKE_CASE ):
__A= position % (lowest * 2) # puts it in bounds
__A= min(_SCREAMING_SNAKE_CASE,lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_SCREAMING_SNAKE_CASE )
__A= ["""""".join(_SCREAMING_SNAKE_CASE ) for row in temp_grid]
__A= """""".join(_SCREAMING_SNAKE_CASE )
return output_string
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : str,_SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__A= []
__A= key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
__A= [[] for _ in range(_SCREAMING_SNAKE_CASE )] # generates template
for position in range(len(_SCREAMING_SNAKE_CASE ) ):
__A= position % (lowest * 2) # puts it in bounds
__A= min(_SCREAMING_SNAKE_CASE,lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
__A= 0
for row in temp_grid: # fills in the characters
__A= input_string[counter : counter + len(_SCREAMING_SNAKE_CASE )]
grid.append(list(_SCREAMING_SNAKE_CASE ) )
counter += len(_SCREAMING_SNAKE_CASE )
__A= """""" # reads as zigzag
for position in range(len(_SCREAMING_SNAKE_CASE ) ):
__A= position % (lowest * 2) # puts it in bounds
__A= min(_SCREAMING_SNAKE_CASE,lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
__A= {}
for key_guess in range(1,len(_SCREAMING_SNAKE_CASE ) ): # tries every key
__A= decrypt(_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 186 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
A_ : int = TapasConfig.from_json_file(lowerCamelCase)
# set absolute/relative position embeddings parameter
A_ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
A_ : Optional[int] = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WTQ":
# run_task_main.py hparams
A_ : Tuple = 4
A_ : Optional[Any] = True
# hparam_utils.py hparams
A_ : Any = 0.66_4694
A_ : str = 0.20_7951
A_ : Any = 0.12_1194
A_ : str = True
A_ : Dict = True
A_ : int = False
A_ : int = 0.035_2513
A_ : Tuple = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
A_ : int = 4
A_ : Union[str, Any] = False
# hparam_utils.py hparams
A_ : Dict = 36.4519
A_ : List[Any] = 0.90_3421
A_ : Any = 222.088
A_ : Optional[Any] = True
A_ : Optional[int] = True
A_ : Optional[Any] = True
A_ : Optional[int] = 0.76_3141
A_ : Any = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "TABFACT":
A_ : Any = TapasForSequenceClassification(config=lowerCamelCase)
elif task == "MLM":
A_ : List[Any] = TapasForMaskedLM(config=lowerCamelCase)
elif task == "INTERMEDIATE_PRETRAINING":
A_ : Union[str, Any] = TapasModel(config=lowerCamelCase)
else:
raise ValueError(F'Task {task} not supported.')
print(F'Building PyTorch model from configuration: {config}')
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}')
model.save_pretrained(lowerCamelCase)
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}')
A_ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512)
tokenizer.save_pretrained(lowerCamelCase)
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 665 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( A__ , A__ ):
while b:
a_ = b, a % b
return a
def UpperCamelCase_ ( A__ , A__ ):
return a if b == 0 else euclidean_gcd_recursive(A__ , a % b )
def UpperCamelCase_ ( ):
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 263 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""vqvae"""]
def __init__( self : Optional[Any] ,_a : AutoencoderKL ,_a : UNetaDConditionModel ,_a : Mel ,_a : Union[DDIMScheduler, DDPMScheduler] ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_a ,scheduler=_a ,mel=_a ,vqvae=_a )
def _a ( self : str ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_a ) else 1000
@torch.no_grad()
def __call__( self : Optional[int] ,_a : int = 1 ,_a : str = None ,_a : np.ndarray = None ,_a : int = 0 ,_a : int = 0 ,_a : int = None ,_a : torch.Generator = None ,_a : float = 0 ,_a : float = 0 ,_a : torch.Generator = None ,_a : float = 0 ,_a : torch.Tensor = None ,_a : torch.Tensor = None ,_a : int=True ,):
'''simple docstring'''
A_ : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_a )
A_ : Union[str, Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
A_ : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
A_ : int = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_a ,device=self.device ,)
A_ : List[Any] = noise
A_ : str = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_a ,_a )
A_ : Any = self.mel.audio_slice_to_image(_a )
A_ : Union[str, Any] = np.frombuffer(input_image.tobytes() ,dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
A_ : Optional[Any] = (input_image / 255) * 2 - 1
A_ : Union[str, Any] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
A_ : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(_a ,0 ) ).latent_dist.sample(
generator=_a )[0]
A_ : List[str] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
A_ : Any = self.scheduler.add_noise(_a ,_a ,self.scheduler.timesteps[start_step - 1] )
A_ : Tuple = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
A_ : Tuple = int(mask_start_secs * pixels_per_second )
A_ : str = int(mask_end_secs * pixels_per_second )
A_ : int = self.scheduler.add_noise(_a ,_a ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_a ):
A_ : Optional[Any] = self.unet(_a ,_a ,_a )["""sample"""]
else:
A_ : List[Any] = self.unet(_a ,_a )["""sample"""]
if isinstance(self.scheduler ,_a ):
A_ : Dict = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,eta=_a ,generator=_a ,)["""prev_sample"""]
else:
A_ : Any = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,generator=_a ,)["""prev_sample"""]
if mask is not None:
if mask_start > 0:
A_ : Tuple = mask[:, step, :, :mask_start]
if mask_end > 0:
A_ : List[str] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
A_ : str = 1 / self.vqvae.config.scaling_factor * images
A_ : Union[str, Any] = self.vqvae.decode(_a )["""sample"""]
A_ : int = (images / 2 + 0.5).clamp(0 ,1 )
A_ : str = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
A_ : Optional[int] = (images * 255).round().astype("""uint8""" )
A_ : List[Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_a ,mode="""RGB""" ).convert("""L""" ) for _ in images) )
A_ : Tuple = [self.mel.image_to_audio(_a ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_a )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_a ) )
@torch.no_grad()
def _a ( self : Union[str, Any] ,_a : List[Image.Image] ,_a : int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_a )
self.scheduler.set_timesteps(_a )
A_ : Optional[Any] = np.array(
[np.frombuffer(image.tobytes() ,dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
A_ : List[str] = (sample / 255) * 2 - 1
A_ : Optional[int] = torch.Tensor(_a ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
A_ : List[str] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
A_ : Any = self.scheduler.alphas_cumprod[t]
A_ : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
A_ : str = 1 - alpha_prod_t
A_ : List[str] = self.unet(_a ,_a )["""sample"""]
A_ : str = (1 - alpha_prod_t_prev) ** 0.5 * model_output
A_ : Union[str, Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
A_ : Optional[int] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _a ( _a : torch.Tensor ,_a : torch.Tensor ,_a : float ):
'''simple docstring'''
A_ : List[Any] = acos(torch.dot(torch.flatten(_a ) ,torch.flatten(_a ) ) / torch.norm(_a ) / torch.norm(_a ) )
return sin((1 - alpha) * theta ) * xa / sin(_a ) + sin(alpha * theta ) * xa / sin(_a )
| 665 | 0 |
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Tuple ) -> int:
SCREAMING_SNAKE_CASE_ : str =[0] * len(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[]
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[]
SCREAMING_SNAKE_CASE_ : Tuple =0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(UpperCAmelCase_ ) ):
if indegree[i] == 0:
queue.append(UpperCAmelCase_ )
while queue:
SCREAMING_SNAKE_CASE_ : Any =queue.pop(0 )
cnt += 1
topo.append(UpperCAmelCase_ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(UpperCAmelCase_ )
if cnt != len(UpperCAmelCase_ ):
print('''Cycle exists''' )
else:
print(UpperCAmelCase_ )
# Adjacency List of Graph
_lowercase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 443 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__magic_name__ = 16
__magic_name__ = 32
def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16):
A_ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""")
A_ : str = load_dataset("""glue""" , """mrpc""")
def tokenize_function(lowerCamelCase : Dict):
# max_length=None => use the model max length (it's actually the default)
A_ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A_ : Tuple = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(lowerCamelCase : Tuple):
# On TPU it's best to pad everything to the same length or training will be very slow.
A_ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A_ : List[Any] = 16
elif accelerator.mixed_precision != "no":
A_ : Any = 8
else:
A_ : Tuple = None
return tokenizer.pad(
lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
A_ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=lowerCamelCase)
A_ : str = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Dict):
# Initialize accelerator
A_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : List[Any] = config["""lr"""]
A_ : List[Any] = int(config["""num_epochs"""])
A_ : int = int(config["""seed"""])
A_ : Dict = int(config["""batch_size"""])
A_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""")
# If the batch size is too big we use gradient accumulation
A_ : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A_ : Any = batch_size // MAX_GPU_BATCH_SIZE
A_ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(lowerCamelCase)
A_ , A_ : List[str] = get_dataloaders(lowerCamelCase , lowerCamelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ : str = model.to(accelerator.device)
# Instantiate optimizer
A_ : str = AdamW(params=model.parameters() , lr=lowerCamelCase)
# Instantiate scheduler
A_ : Tuple = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ , A_ , A_ , A_ , A_ : Union[str, Any] = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Now we train the model
for epoch in range(lowerCamelCase):
model.train()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
A_ : Optional[int] = model(**lowerCamelCase)
A_ : List[Any] = outputs.loss
A_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
A_ : Union[str, Any] = model(**lowerCamelCase)
A_ : Any = outputs.logits.argmax(dim=-1)
A_ , A_ : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]))
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
A_ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase)
def lowerCamelCase ( ):
A_ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""")
parser.add_argument(
"""--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""")
A_ : Dict = parser.parse_args()
A_ : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 0 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
a : Optional[Any] = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
a : Union[str, Any] = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
a : Any = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
a : Union[str, Any] = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
a : Optional[Any] = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def snake_case__ ( lowercase , lowercase ):
for tf_name, hf_name in patterns:
lowerCAmelCase_: Tuple = k.replace(lowercase , lowercase )
return k
def snake_case__ ( lowercase , lowercase ):
lowerCAmelCase_: Optional[int] = BigBirdPegasusConfig(**lowercase )
lowerCAmelCase_: Optional[Any] = BigBirdPegasusForConditionalGeneration(lowercase )
lowerCAmelCase_: str = torch_model.state_dict()
lowerCAmelCase_: Optional[Any] = {}
# separating decoder weights
lowerCAmelCase_: str = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
lowerCAmelCase_: Union[str, Any] = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ):
lowerCAmelCase_: List[str] = [k.endswith(lowercase ) for ending in KEYS_TO_IGNORE]
if any(lowercase ):
continue
lowerCAmelCase_: Optional[Any] = DECODER_PATTERNS
lowerCAmelCase_: int = rename_state_dict_key(lowercase , lowercase )
if new_k not in state_dict:
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
lowerCAmelCase_: int = v.T
lowerCAmelCase_: Union[str, Any] = torch.from_numpy(lowercase )
assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ):
lowerCAmelCase_: List[Any] = [k.endswith(lowercase ) for ending in KEYS_TO_IGNORE]
if any(lowercase ):
continue
lowerCAmelCase_: Optional[int] = REMAINING_PATTERNS
lowerCAmelCase_: Any = rename_state_dict_key(lowercase , lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'''could not find new key {new_k} in state dict. (converted from {k})''' )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
lowerCAmelCase_: Optional[Any] = v.T
lowerCAmelCase_: int = torch.from_numpy(lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'''{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'''
lowerCAmelCase_: int = mapping["""model.embed_positions.weight"""]
lowerCAmelCase_: Union[str, Any] = mapping.pop("model.embed_positions.weight" )
lowerCAmelCase_: str = torch_model.load_state_dict(lowercase , strict=lowercase )
lowerCAmelCase_: List[Any] = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], F'''no matches found for the following torch keys {unexpected_missing}'''
assert extra == [], F'''no matches found for the following tf keys {extra}'''
return torch_model
def snake_case__ ( lowercase ):
lowerCAmelCase_: Any = tf.train.list_variables(lowercase )
lowerCAmelCase_: Optional[int] = {}
lowerCAmelCase_: Dict = ["""global_step"""]
for name, shape in tqdm(lowercase , desc="converting tf checkpoint to dict" ):
lowerCAmelCase_: List[str] = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowerCAmelCase_: List[Any] = tf.train.load_variable(lowercase , lowercase )
lowerCAmelCase_: List[Any] = array
return tf_weights
def snake_case__ ( lowercase , lowercase , lowercase ):
lowerCAmelCase_: List[Any] = get_tf_weights_as_numpy(lowercase )
lowerCAmelCase_: int = convert_bigbird_pegasus(lowercase , lowercase )
torch_model.save_pretrained(lowercase )
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
a : Dict = parser.parse_args()
a : List[str] = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update) | 613 |
'''simple docstring'''
import functools
def lowerCamelCase ( lowerCamelCase : list[int] , lowerCamelCase : list[int]):
# Validation
if not isinstance(lowerCamelCase , lowerCamelCase) or not all(isinstance(lowerCamelCase , lowerCamelCase) for day in days):
raise ValueError("""The parameter days should be a list of integers""")
if len(lowerCamelCase) != 3 or not all(isinstance(lowerCamelCase , lowerCamelCase) for cost in costs):
raise ValueError("""The parameter costs should be a list of three integers""")
if len(lowerCamelCase) == 0:
return 0
if min(lowerCamelCase) <= 0:
raise ValueError("""All days elements should be greater than 0""")
if max(lowerCamelCase) >= 366:
raise ValueError("""All days elements should be less than 366""")
A_ : Tuple = set(lowerCamelCase)
@functools.cache
def dynamic_programming(lowerCamelCase : int) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1)
return min(
costs[0] + dynamic_programming(index + 1) , costs[1] + dynamic_programming(index + 7) , costs[2] + dynamic_programming(index + 30) , )
return dynamic_programming(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def _A ( ):
"""simple docstring"""
a__ : List[str] =9, 14 # noqa: F841
a__ : Optional[Any] =[
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
a__ : List[str] =defaultdict(SCREAMING_SNAKE_CASE )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
a__ : List[Any] =mst(SCREAMING_SNAKE_CASE )
a__ : Optional[int] =[
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
a__ : Tuple =tuple(answer[:2] )
a__ : Union[str, Any] =tuple(edge[::-1] )
assert edge in result or reverse in result
| 563 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCamelCase ( lowerCamelCase : NDArray[floataa] , lowerCamelCase : NDArray[floataa] , lowerCamelCase : list[int] , lowerCamelCase : int , ):
A_ , A_ : int = coefficient_matrix.shape
A_ , A_ : Union[str, Any] = constant_matrix.shape
if rowsa != colsa:
A_ : Any = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if colsa != 1:
A_ : Tuple = F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if rowsa != rowsa:
A_ : Dict = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(lowerCamelCase)
if len(lowerCamelCase) != rowsa:
A_ : Union[str, Any] = (
"""Number of initial values must be equal to number of rows in coefficient """
F'matrix but received {len(lowerCamelCase)} and {rowsa}'
)
raise ValueError(lowerCamelCase)
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""")
A_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1)
A_ , A_ : int = table.shape
strictly_diagonally_dominant(lowerCamelCase)
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase):
A_ : List[Any] = []
for row in range(lowerCamelCase):
A_ : int = 0
for col in range(lowerCamelCase):
if col == row:
A_ : List[str] = table[row][col]
elif col == cols - 1:
A_ : str = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ : Union[str, Any] = (temp + val) / denom
new_val.append(lowerCamelCase)
A_ : Tuple = new_val
return [float(lowerCamelCase) for i in new_val]
def lowerCamelCase ( lowerCamelCase : NDArray[floataa]):
A_ , A_ : Dict = table.shape
A_ : Union[str, Any] = True
for i in range(0 , lowerCamelCase):
A_ : str = 0
for j in range(0 , cols - 1):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""")
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[Any] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
SCREAMING_SNAKE_CASE : Tuple = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
SCREAMING_SNAKE_CASE : Union[str, Any] = {"facebook/blenderbot_small-90M": 512}
def _UpperCamelCase ( lowerCAmelCase__: Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ = char
SCREAMING_SNAKE_CASE_ = set(lowerCAmelCase__ )
return pairs
class snake_case ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["""input_ids""", """attention_mask"""]
def __init__( self, _lowercase, _lowercase, _lowercase="__start__", _lowercase="__end__", _lowercase="__unk__", _lowercase="__null__", **_lowercase, ) -> str:
super().__init__(unk_token=_a, bos_token=_a, eos_token=_a, pad_token=_a, **_a )
with open(_a, encoding='utf-8' ) as vocab_handle:
SCREAMING_SNAKE_CASE_ = json.load(_a )
SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()}
with open(_a, encoding='utf-8' ) as merges_handle:
SCREAMING_SNAKE_CASE_ = merges_handle.read().split('\n' )[1:-1]
SCREAMING_SNAKE_CASE_ = [tuple(merge.split() ) for merge in merges]
SCREAMING_SNAKE_CASE_ = dict(zip(_a, range(len(_a ) ) ) )
SCREAMING_SNAKE_CASE_ = {}
@property
def a__ ( self ) -> Union[str, Any]:
return len(self.encoder )
def a__ ( self ) -> Optional[Any]:
return dict(self.encoder, **self.added_tokens_encoder )
def a__ ( self, _lowercase ) -> Tuple:
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ = re.sub('([.,!?()])', R' \1', _a )
SCREAMING_SNAKE_CASE_ = re.sub('(\')', R' \1 ', _a )
SCREAMING_SNAKE_CASE_ = re.sub(R'\s{2,}', ' ', _a )
if "\n" in token:
SCREAMING_SNAKE_CASE_ = token.replace('\n', ' __newln__' )
SCREAMING_SNAKE_CASE_ = token.split(' ' )
SCREAMING_SNAKE_CASE_ = []
for token in tokens:
if not len(_a ):
continue
SCREAMING_SNAKE_CASE_ = token.lower()
SCREAMING_SNAKE_CASE_ = tuple(_a )
SCREAMING_SNAKE_CASE_ = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
SCREAMING_SNAKE_CASE_ = get_pairs(_a )
if not pairs:
words.append(_a )
continue
while True:
SCREAMING_SNAKE_CASE_ = min(_a, key=lambda _lowercase : self.bpe_ranks.get(_a, float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ = bigram
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 0
while i < len(_a ):
try:
SCREAMING_SNAKE_CASE_ = word.index(_a, _a )
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ = tuple(_a )
SCREAMING_SNAKE_CASE_ = new_word
if len(_a ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ = get_pairs(_a )
SCREAMING_SNAKE_CASE_ = """@@ """.join(_a )
SCREAMING_SNAKE_CASE_ = word[:-4]
SCREAMING_SNAKE_CASE_ = word
words.append(_a )
return " ".join(_a )
def a__ ( self, _lowercase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = re.findall(R'\S+\n?', _a )
for token in words:
split_tokens.extend(list(self.bpe(_a ).split(' ' ) ) )
return split_tokens
def a__ ( self, _lowercase ) -> Any:
SCREAMING_SNAKE_CASE_ = token.lower()
return self.encoder.get(_a, self.encoder.get(self.unk_token ) )
def a__ ( self, _lowercase ) -> Any:
return self.decoder.get(_a, self.unk_token )
def a__ ( self, _lowercase ) -> str:
SCREAMING_SNAKE_CASE_ = """ """.join(_a ).replace('@@ ', '' ).strip()
return out_string
def a__ ( self, _lowercase, _lowercase = None ) -> Union[str, Any]:
if not os.path.isdir(_a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_a, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ = os.path.join(
_a, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_a, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=_a, ensure_ascii=_a ) + '\n' )
SCREAMING_SNAKE_CASE_ = 0
with open(_a, 'w', encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
SCREAMING_SNAKE_CASE_ = token_index
writer.write(' '.join(_a ) + '\n' )
index += 1
return vocab_file, merge_file
| 294 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
A_ : Any = len(lowerCamelCase)
A_ : Optional[Any] = len(lowerCamelCase)
A_ : Optional[int] = [[False for _ in range(m + 1)] for _ in range(n + 1)]
A_ : Union[str, Any] = True
for i in range(lowerCamelCase):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A_ : Optional[int] = True
if a[i].islower():
A_ : List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase : Tuple , lowerCAmelCase : str=1_00 , lowerCAmelCase : Optional[Any]=13 , lowerCAmelCase : Optional[int]=30 , lowerCAmelCase : List[str]=2 , lowerCAmelCase : Any=3 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : str=True , lowerCAmelCase : Any=32 , lowerCAmelCase : int=4 , lowerCAmelCase : Optional[Any]=4 , lowerCAmelCase : Any=37 , lowerCAmelCase : str="gelu" , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : List[Any]=10 , lowerCAmelCase : Any=0.02 , lowerCAmelCase : Dict=3 , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : int=[0, 1, 2, 3] , ) -> str:
"""simple docstring"""
__lowerCAmelCase : List[str] = parent
__lowerCAmelCase : Union[str, Any] = 1_00
__lowerCAmelCase : Dict = batch_size
__lowerCAmelCase : Optional[int] = image_size
__lowerCAmelCase : Tuple = patch_size
__lowerCAmelCase : Any = num_channels
__lowerCAmelCase : Tuple = is_training
__lowerCAmelCase : Any = use_labels
__lowerCAmelCase : Tuple = hidden_size
__lowerCAmelCase : Union[str, Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Optional[Any] = intermediate_size
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : List[str] = hidden_dropout_prob
__lowerCAmelCase : str = attention_probs_dropout_prob
__lowerCAmelCase : Any = type_sequence_label_size
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Any = scope
__lowerCAmelCase : Dict = out_indices
__lowerCAmelCase : Optional[int] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCAmelCase : str = (image_size // patch_size) ** 2
__lowerCAmelCase : Tuple = num_patches + 1
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase : str = None
__lowerCAmelCase : Optional[int] = None
if self.use_labels:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowerCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : Dict , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = BeitModel(config=_a )
model.to(_a )
model.eval()
__lowerCAmelCase : Tuple = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ) -> int:
"""simple docstring"""
__lowerCAmelCase : Dict = BeitForMaskedImageModeling(config=_a )
model.to(_a )
model.eval()
__lowerCAmelCase : Any = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple ) -> str:
"""simple docstring"""
__lowerCAmelCase : int = self.type_sequence_label_size
__lowerCAmelCase : str = BeitForImageClassification(_a )
model.to(_a )
model.eval()
__lowerCAmelCase : List[Any] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCAmelCase : Tuple = 1
__lowerCAmelCase : int = BeitForImageClassification(_a )
model.to(_a )
model.eval()
__lowerCAmelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase : str = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.num_labels
__lowerCAmelCase : List[str] = BeitForSemanticSegmentation(_a )
model.to(_a )
model.eval()
__lowerCAmelCase : int = model(_a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
__lowerCAmelCase : int = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Any = self.prepare_config_and_inputs()
__lowerCAmelCase : str = config_and_inputs
__lowerCAmelCase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : int =(
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCamelCase : Optional[int] =(
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase : List[Any] =False
lowerCamelCase : Union[str, Any] =False
lowerCamelCase : Optional[int] =False
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = BeitModelTester(self )
__lowerCAmelCase : List[Any] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : List[Any] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Dict = model_class(_a )
__lowerCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : str = [*signature.parameters.keys()]
__lowerCAmelCase : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
"""simple docstring"""
if not self.model_tester.is_training:
return
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Union[str, Any] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(_a ), BeitForMaskedImageModeling]:
continue
__lowerCAmelCase : Tuple = model_class(_a )
model.to(_a )
model.train()
__lowerCAmelCase : Any = self._prepare_for_class(_a , _a , return_labels=_a )
__lowerCAmelCase : int = model(**_a ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Dict = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(_a ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__lowerCAmelCase : Optional[int] = model_class(_a )
model.gradient_checkpointing_enable()
model.to(_a )
model.train()
__lowerCAmelCase : str = self._prepare_for_class(_a , _a , return_labels=_a )
__lowerCAmelCase : Any = model(**_a ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Tuple = _config_zero_init(_a )
for model_class in self.all_model_classes:
__lowerCAmelCase : str = model_class(config=_a )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Any = BeitModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def snake_case_ () -> str:
__lowerCAmelCase : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
"""simple docstring"""
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : int = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(_a )
__lowerCAmelCase : int = self.default_image_processor
__lowerCAmelCase : int = prepare_img()
__lowerCAmelCase : Any = image_processor(images=_a , return_tensors="""pt""" ).pixel_values.to(_a )
# prepare bool_masked_pos
__lowerCAmelCase : Optional[Any] = torch.ones((1, 1_96) , dtype=torch.bool ).to(_a )
# forward pass
with torch.no_grad():
__lowerCAmelCase : List[str] = model(pixel_values=_a , bool_masked_pos=_a )
__lowerCAmelCase : Dict = outputs.logits
# verify the logits
__lowerCAmelCase : Dict = torch.Size((1, 1_96, 81_92) )
self.assertEqual(logits.shape , _a )
__lowerCAmelCase : List[Any] = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(_a )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , _a , atol=1e-2 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(_a )
__lowerCAmelCase : Tuple = self.default_image_processor
__lowerCAmelCase : Optional[int] = prepare_img()
__lowerCAmelCase : Optional[int] = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = model(**_a )
__lowerCAmelCase : Tuple = outputs.logits
# verify the logits
__lowerCAmelCase : Any = torch.Size((1, 10_00) )
self.assertEqual(logits.shape , _a )
__lowerCAmelCase : Tuple = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(_a )
self.assertTrue(torch.allclose(logits[0, :3] , _a , atol=1e-4 ) )
__lowerCAmelCase : Optional[int] = 2_81
self.assertEqual(logits.argmax(-1 ).item() , _a )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : int = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
_a )
__lowerCAmelCase : str = self.default_image_processor
__lowerCAmelCase : List[str] = prepare_img()
__lowerCAmelCase : Any = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
__lowerCAmelCase : Dict = model(**_a )
__lowerCAmelCase : List[str] = outputs.logits
# verify the logits
__lowerCAmelCase : Union[str, Any] = torch.Size((1, 2_18_41) )
self.assertEqual(logits.shape , _a )
__lowerCAmelCase : int = torch.tensor([1.6881, -0.2787, 0.5901] ).to(_a )
self.assertTrue(torch.allclose(logits[0, :3] , _a , atol=1e-4 ) )
__lowerCAmelCase : int = 23_96
self.assertEqual(logits.argmax(-1 ).item() , _a )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : List[str] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
__lowerCAmelCase : str = model.to(_a )
__lowerCAmelCase : List[Any] = BeitImageProcessor(do_resize=_a , size=6_40 , do_center_crop=_a )
__lowerCAmelCase : List[str] = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
__lowerCAmelCase : str = Image.open(ds[0]["""file"""] )
__lowerCAmelCase : str = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
__lowerCAmelCase : str = model(**_a )
__lowerCAmelCase : List[str] = outputs.logits
# verify the logits
__lowerCAmelCase : int = torch.Size((1, 1_50, 1_60, 1_60) )
self.assertEqual(logits.shape , _a )
__lowerCAmelCase : Tuple = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
__lowerCAmelCase : Optional[int] = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=_a , )
else:
__lowerCAmelCase : Optional[Any] = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=_a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _a , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
__lowerCAmelCase : List[str] = model.to(_a )
__lowerCAmelCase : Union[str, Any] = BeitImageProcessor(do_resize=_a , size=6_40 , do_center_crop=_a )
__lowerCAmelCase : Any = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
__lowerCAmelCase : Union[str, Any] = Image.open(ds[0]["""file"""] )
__lowerCAmelCase : Dict = image_processor(images=_a , return_tensors="""pt""" ).to(_a )
# forward pass
with torch.no_grad():
__lowerCAmelCase : int = model(**_a )
__lowerCAmelCase : Optional[int] = outputs.logits.detach().cpu()
__lowerCAmelCase : List[str] = image_processor.post_process_semantic_segmentation(outputs=_a , target_sizes=[(5_00, 3_00)] )
__lowerCAmelCase : Tuple = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , _a )
__lowerCAmelCase : Dict = image_processor.post_process_semantic_segmentation(outputs=_a )
__lowerCAmelCase : Tuple = torch.Size((1_60, 1_60) )
self.assertEqual(segmentation[0].shape , _a )
| 651 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
a_ = 42
a_ = 42
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : list[list[Edge]] = [[] for _ in range(_a )]
A_ : List[Any] = size
def __getitem__( self : int ,_a : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _a ( self : str ):
'''simple docstring'''
return self._size
def _a ( self : str ,_a : int ,_a : int ,_a : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(_a ,_a ) )
def _a ( self : Dict ,_a : int ,_a : int ):
'''simple docstring'''
A_ : Tuple = deque([start_vertex] )
A_ : list[int | None] = [None] * self.size
A_ : Union[str, Any] = 0
while queue:
A_ : List[Any] = queue.popleft()
A_ : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A_ : Union[str, Any] = current_distance + edge.weight
A_ : Optional[Any] = distances[edge.destination_vertex]
if (
isinstance(_a ,_a )
and new_distance >= dest_vertex_distance
):
continue
A_ : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 0 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _a ( lowerCamelCase ):
lowerCamelCase : Dict = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCamelCase : str = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowerCamelCase : Optional[int] = 4
lowerCamelCase : Optional[Any] = 48
lowerCamelCase : Optional[int] = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCamelCase : Tuple = [6, 6, 6, 6]
lowerCamelCase : Any = 60
lowerCamelCase : Optional[Any] = [6, 6, 6, 6]
lowerCamelCase : Dict = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCamelCase : Optional[Any] = 4
lowerCamelCase : int = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowerCamelCase : Tuple = 1
lowerCamelCase : Optional[int] = 1
lowerCamelCase : List[str] = 126
lowerCamelCase : int = 7
lowerCamelCase : Optional[Any] = 2_5_5.0
lowerCamelCase : Optional[int] = """"""
return config
def _a ( lowerCamelCase, lowerCamelCase ):
if "patch_embed.proj" in name and "layers" not in name:
lowerCamelCase : List[Any] = name.replace("""patch_embed.proj""", """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCamelCase : Union[str, Any] = name.replace("""patch_embed.norm""", """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
lowerCamelCase : Union[str, Any] = name.replace("""layers""", """encoder.stages""" )
if "residual_group.blocks" in name:
lowerCamelCase : Any = name.replace("""residual_group.blocks""", """layers""" )
if "attn.proj" in name:
lowerCamelCase : int = name.replace("""attn.proj""", """attention.output.dense""" )
if "attn" in name:
lowerCamelCase : List[str] = name.replace("""attn""", """attention.self""" )
if "norm1" in name:
lowerCamelCase : str = name.replace("""norm1""", """layernorm_before""" )
if "norm2" in name:
lowerCamelCase : Any = name.replace("""norm2""", """layernorm_after""" )
if "mlp.fc1" in name:
lowerCamelCase : Dict = name.replace("""mlp.fc1""", """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase : Optional[Any] = name.replace("""mlp.fc2""", """output.dense""" )
if "q_bias" in name:
lowerCamelCase : Union[str, Any] = name.replace("""q_bias""", """query.bias""" )
if "k_bias" in name:
lowerCamelCase : Dict = name.replace("""k_bias""", """key.bias""" )
if "v_bias" in name:
lowerCamelCase : Optional[Any] = name.replace("""v_bias""", """value.bias""" )
if "cpb_mlp" in name:
lowerCamelCase : List[str] = name.replace("""cpb_mlp""", """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
lowerCamelCase : Optional[Any] = name.replace("""patch_embed.proj""", """patch_embed.projection""" )
if name == "norm.weight":
lowerCamelCase : Any = """layernorm.weight"""
if name == "norm.bias":
lowerCamelCase : Union[str, Any] = """layernorm.bias"""
if "conv_first" in name:
lowerCamelCase : List[str] = name.replace("""conv_first""", """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowerCamelCase : Dict = name.replace("""conv_last""", """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowerCamelCase : Tuple = name.replace("""conv_before_upsample.0""", """conv_before_upsample""" )
if "upsample.0" in name:
lowerCamelCase : Union[str, Any] = name.replace("""upsample.0""", """upsample.convolution_0""" )
if "upsample.2" in name:
lowerCamelCase : List[str] = name.replace("""upsample.2""", """upsample.convolution_1""" )
lowerCamelCase : Optional[int] = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
lowerCamelCase : List[Any] = name.replace("""upsample.0.weight""", """upsample.conv.weight""" )
lowerCamelCase : Tuple = name.replace("""upsample.0.bias""", """upsample.conv.bias""" )
else:
pass
else:
lowerCamelCase : Any = """swin2sr.""" + name
return name
def _a ( lowerCamelCase, lowerCamelCase ):
for key in orig_state_dict.copy().keys():
lowerCamelCase : Optional[Any] = orig_state_dict.pop(lowerCamelCase )
if "qkv" in key:
lowerCamelCase : Optional[int] = key.split(""".""" )
lowerCamelCase : Any = int(key_split[1] )
lowerCamelCase : List[Any] = int(key_split[4] )
lowerCamelCase : str = config.embed_dim
if "weight" in key:
lowerCamelCase : Union[str, Any] = val[:dim, :]
lowerCamelCase : List[Any] = val[dim : dim * 2, :]
lowerCamelCase : Tuple = val[-dim:, :]
else:
lowerCamelCase : Optional[int] = val[:dim]
lowerCamelCase : Union[str, Any] = val[dim : dim * 2]
lowerCamelCase : str = val[-dim:]
pass
else:
lowerCamelCase : Any = val
return orig_state_dict
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : int = get_config(lowerCamelCase )
lowerCamelCase : str = SwinaSRForImageSuperResolution(lowerCamelCase )
model.eval()
lowerCamelCase : Union[str, Any] = torch.hub.load_state_dict_from_url(lowerCamelCase, map_location="""cpu""" )
lowerCamelCase : List[str] = convert_state_dict(lowerCamelCase, lowerCamelCase )
lowerCamelCase : Any = model.load_state_dict(lowerCamelCase, strict=lowerCamelCase )
if len(lowerCamelCase ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(lowerCamelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'''Unexpected key {key} in state_dict''' )
# verify values
lowerCamelCase : int = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
lowerCamelCase : Tuple = Image.open(requests.get(lowerCamelCase, stream=lowerCamelCase ).raw ).convert("""RGB""" )
lowerCamelCase : Any = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowerCamelCase : Optional[int] = 126 if """Jpeg""" in checkpoint_url else 256
lowerCamelCase : Tuple = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6], std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
lowerCamelCase : Union[str, Any] = transforms(lowerCamelCase ).unsqueeze(0 )
if config.num_channels == 1:
lowerCamelCase : Tuple = pixel_values[:, 0, :, :].unsqueeze(1 )
lowerCamelCase : Optional[int] = model(lowerCamelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowerCamelCase : Optional[Any] = torch.Size([1, 3, 512, 512] )
lowerCamelCase : List[Any] = torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCamelCase : str = torch.Size([1, 3, 1024, 1024] )
lowerCamelCase : Tuple = torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowerCamelCase : Dict = torch.Size([1, 3, 1024, 1024] )
lowerCamelCase : str = torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCamelCase : List[str] = torch.Size([1, 3, 512, 512] )
lowerCamelCase : List[str] = torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCamelCase : Optional[int] = torch.Size([1, 3, 1024, 1024] )
lowerCamelCase : Optional[int] = torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3], lowerCamelCase, atol=1e-3 )
print("""Looks ok!""" )
lowerCamelCase : Optional[int] = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
lowerCamelCase : Any = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCamelCase )
if push_to_hub:
model.push_to_hub(F'''caidas/{model_name}''' )
processor.push_to_hub(F'''caidas/{model_name}''' )
if __name__ == "__main__":
_lowerCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
_lowerCamelCase =parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 681 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int = 10**9):
A_ : Optional[int] = 1
A_ : int = 2
A_ : List[Any] = 0
A_ : Optional[Any] = 0
A_ : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
A_ : Optional[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 665 | 0 |
'''simple docstring'''
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
lowercase_ = 0b101_100_111_110_110_010_010_000_011_110_111_011_000_110_011_110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
lowercase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class a_ :
'''simple docstring'''
def __init__( self ) -> Any:
_SCREAMING_SNAKE_CASE = WATERMARK_BITS
_SCREAMING_SNAKE_CASE = WatermarkEncoder()
self.encoder.set_watermark("""bits""" , self.watermark )
def snake_case_( self , A ) -> List[Any]:
if images.shape[-1] < 256:
return images
_SCREAMING_SNAKE_CASE = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_SCREAMING_SNAKE_CASE = [self.encoder.encode(_a , """dwtDct""" ) for image in images]
_SCREAMING_SNAKE_CASE = torch.from_numpy(np.array(_a ) ).permute(0 , 3 , 1 , 2 )
_SCREAMING_SNAKE_CASE = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 314 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase ( ):
A_ : Optional[int] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCamelCase)
A_ : Optional[int] = parser.add_subparsers(help="""accelerate command helpers""")
# Register commands
get_config_parser(subparsers=lowerCamelCase)
env_command_parser(subparsers=lowerCamelCase)
launch_command_parser(subparsers=lowerCamelCase)
tpu_command_parser(subparsers=lowerCamelCase)
test_command_parser(subparsers=lowerCamelCase)
# Let's go
A_ : Dict = parser.parse_args()
if not hasattr(lowerCamelCase , """func"""):
parser.print_help()
exit(1)
# Run
args.func(lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 0 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__a : int = logging.get_logger(__name__)
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> int:
'''simple docstring'''
if not conversation_id:
__lowercase = uuid.uuida()
if past_user_inputs is None:
__lowercase = []
if generated_responses is None:
__lowercase = []
__lowercase = conversation_id
__lowercase = past_user_inputs
__lowercase = generated_responses
__lowercase = text
def __eq__( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
if not isinstance(_a , _a ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> str:
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten "
F"with: \"{text}\"." )
__lowercase = text
else:
logger.warning(
F"User input added while unprocessed input was existing: \"{self.new_user_input}\" new input "
F"ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input" )
else:
__lowercase = text
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__lowercase = None
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
self.generated_responses.append(_a )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ) -> Dict:
'''simple docstring'''
__lowercase = F"Conversation id: {self.uuid} \n"
for is_user, text in self.iter_texts():
__lowercase = """user""" if is_user else """bot"""
output += F"{name} >> {text} \n"
return output
@add_end_docstrings(
__SCREAMING_SNAKE_CASE ,r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' ,)
class _UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
super().__init__(*_a , **_a )
if self.tokenizer.pad_token_id is None:
__lowercase = self.tokenizer.eos_token
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
__lowercase = {}
__lowercase = {}
__lowercase = {}
if min_length_for_response is not None:
__lowercase = min_length_for_response
if minimum_tokens is not None:
__lowercase = minimum_tokens
if "max_length" in generate_kwargs:
__lowercase = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__lowercase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_a )
return preprocess_params, forward_params, postprocess_params
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__=0 , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
__lowercase = super().__call__(_a , num_workers=_a , **_a )
if isinstance(_a , _a ) and len(_a ) == 1:
return outputs[0]
return outputs
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__=32 ) -> List[str]:
'''simple docstring'''
if not isinstance(_a , _a ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
F"Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. "
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
__lowercase = self.tokenizer._build_conversation_input_ids(_a )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__lowercase = self._legacy_parse_and_tokenize(_a )
if self.framework == "pt":
__lowercase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__lowercase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__=10 , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
__lowercase = generate_kwargs.get('''max_length''' , self.model.config.max_length )
__lowercase = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})" )
__lowercase = max_length - minimum_tokens
__lowercase = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__lowercase = model_inputs["""attention_mask"""][:, -trim:]
__lowercase = model_inputs.pop('''conversation''' )
__lowercase = max_length
__lowercase = self.model.generate(**_a , **_a )
if self.model.config.is_encoder_decoder:
__lowercase = 1
else:
__lowercase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__=True ) -> Optional[Any]:
'''simple docstring'''
__lowercase = model_outputs["""output_ids"""]
__lowercase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , )
__lowercase = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(_a )
return conversation
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
__lowercase = self.tokenizer.eos_token_id
__lowercase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_a , add_special_tokens=_a ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_a , add_special_tokens=_a ) )
if len(_a ) > self.tokenizer.model_max_length:
__lowercase = input_ids[-self.tokenizer.model_max_length :]
return input_ids | 534 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 0 |
from __future__ import annotations
from collections.abc import Callable
_lowercase : Tuple =list[list[float | int]]
def lowerCAmelCase_ ( _lowercase : Matrix , _lowercase : Matrix) -> Optional[Any]:
"""simple docstring"""
a__ : int = len(_lowercase)
a__ : Matrix = [[0 for _ in range(size + 1)] for _ in range(_lowercase)]
a__ : int
a__ : int
a__ : int
a__ : int
a__ : int
a__ : float
for row in range(_lowercase):
for col in range(_lowercase):
a__ : List[str] = matrix[row][col]
a__ : Union[str, Any] = vector[row][0]
a__ : Union[str, Any] = 0
a__ : Optional[Any] = 0
while row < size and col < size:
# pivoting
a__ : Any = max((abs(augmented[rowa][col]), rowa) for rowa in range(_lowercase , _lowercase))[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
a__ : Dict = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _lowercase):
a__ : List[Any] = augmented[rowa][col] / augmented[row][col]
a__ : Any = 0
for cola in range(col + 1 , size + 1):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _lowercase):
for row in range(_lowercase):
a__ : Optional[int] = augmented[row][col] / augmented[col][col]
for cola in range(_lowercase , size + 1):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10)] for row in range(_lowercase)
]
def lowerCAmelCase_ ( _lowercase : list[int]) -> Dict:
"""simple docstring"""
a__ : int = len(_lowercase)
a__ : Matrix = [[0 for _ in range(_lowercase)] for _ in range(_lowercase)]
a__ : Matrix = [[0] for _ in range(_lowercase)]
a__ : Matrix
a__ : int
a__ : int
a__ : int
for x_val, y_val in enumerate(_lowercase):
for col in range(_lowercase):
a__ : Optional[Any] = (x_val + 1) ** (size - col - 1)
a__ : int = y_val
a__ : List[Any] = solve(_lowercase , _lowercase)
def interpolated_func(_lowercase : int) -> int:
return sum(
round(coeffs[x_val][0]) * (var ** (size - x_val - 1))
for x_val in range(_lowercase))
return interpolated_func
def lowerCAmelCase_ ( _lowercase : int) -> Dict:
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCAmelCase_ ( _lowercase : Callable[[int], int] = question_function , _lowercase : int = 10) -> Optional[int]:
"""simple docstring"""
a__ : list[int] = [func(_lowercase) for x_val in range(1 , order + 1)]
a__ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff]) for max_coeff in range(1 , order + 1)
]
a__ : int = 0
a__ : Callable[[int], int]
a__ : int
for poly in polynomials:
a__ : Union[str, Any] = 1
while func(_lowercase) == poly(_lowercase):
x_val += 1
ret += poly(_lowercase)
return ret
if __name__ == "__main__":
print(f'{solution() = }')
| 136 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['YolosFeatureExtractor']
__magic_name__ = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {'''configuration_xlnet''': ['''XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''XLNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['''XLNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLNetForMultipleChoice''',
'''XLNetForQuestionAnswering''',
'''XLNetForQuestionAnsweringSimple''',
'''XLNetForSequenceClassification''',
'''XLNetForTokenClassification''',
'''XLNetLMHeadModel''',
'''XLNetModel''',
'''XLNetPreTrainedModel''',
'''load_tf_weights_in_xlnet''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLNetForMultipleChoice''',
'''TFXLNetForQuestionAnsweringSimple''',
'''TFXLNetForSequenceClassification''',
'''TFXLNetForTokenClassification''',
'''TFXLNetLMHeadModel''',
'''TFXLNetMainLayer''',
'''TFXLNetModel''',
'''TFXLNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 186 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase__ =logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class a_ :
lowerCamelCase__ : Dict = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
lowerCamelCase__ : Any = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCamelCase__ : Optional[Any] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The column name of the images in the files.'} )
lowerCamelCase__ : Union[str, Any] = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'A folder containing the training data.'} )
lowerCamelCase__ : Union[str, Any] = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'A folder containing the validation data.'} )
lowerCamelCase__ : Tuple = field(
default=0.1_5 , metadata={'help': 'Percent to split off of train for validation.'} )
lowerCamelCase__ : Any = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCamelCase__ : Optional[Any] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def lowerCAmelCase__ ( self ):
a_ = {}
if self.train_dir is not None:
a_ = self.train_dir
if self.validation_dir is not None:
a_ = self.validation_dir
a_ = data_files if data_files else None
@dataclass
class a_ :
lowerCamelCase__ : str = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
lowerCamelCase__ : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
lowerCamelCase__ : Dict = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
lowerCamelCase__ : Tuple = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
lowerCamelCase__ : List[Any] = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCamelCase__ : Union[str, Any] = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Name or path of preprocessor config.'} )
lowerCamelCase__ : int = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCamelCase__ : Dict = field(
default=0.7_5 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
lowerCamelCase__ : int = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class a_ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : int = field(
default=1e-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def UpperCamelCase_ ( A__ ):
a_ = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def UpperCamelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
a_ = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , A__ , A__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a_ = training_args.get_process_log_level()
logger.setLevel(A__ )
transformers.utils.logging.set_verbosity(A__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
a_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
a_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
a_ = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , A__ ) and data_args.train_val_split > 0.0:
a_ = ds["""train"""].train_test_split(data_args.train_val_split )
a_ = split["""train"""]
a_ = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a_ = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
a_ = ViTMAEConfig.from_pretrained(model_args.config_name , **A__ )
elif model_args.model_name_or_path:
a_ = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **A__ )
else:
a_ = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
a_ = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **A__ )
elif model_args.model_name_or_path:
a_ = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **A__ )
else:
a_ = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
a_ = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
a_ = ViTMAEForPreTraining(A__ )
if training_args.do_train:
a_ = ds["""train"""].column_names
else:
a_ = ds["""validation"""].column_names
if data_args.image_column_name is not None:
a_ = data_args.image_column_name
elif "image" in column_names:
a_ = """image"""
elif "img" in column_names:
a_ = """img"""
else:
a_ = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
a_ = image_processor.size["""shortest_edge"""]
else:
a_ = (image_processor.size["""height"""], image_processor.size["""width"""])
a_ = Compose(
[
Lambda(lambda A__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(A__ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(A__ ):
a_ = [transforms(A__ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
a_ = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(A__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
a_ = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(A__ )
# Compute absolute learning rate
a_ = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
a_ = training_args.base_learning_rate * total_train_batch_size / 2_56
# Initialize our trainer
a_ = Trainer(
model=A__ , args=A__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=A__ , data_collator=A__ , )
# Training
if training_args.do_train:
a_ = None
if training_args.resume_from_checkpoint is not None:
a_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a_ = last_checkpoint
a_ = trainer.train(resume_from_checkpoint=A__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
a_ = trainer.evaluate()
trainer.log_metrics("""eval""" , A__ )
trainer.save_metrics("""eval""" , A__ )
# Write model card and (optionally) push to hub
a_ = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**A__ )
else:
trainer.create_model_card(**A__ )
def UpperCamelCase_ ( A__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 263 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : str = [0] * len(lowerCamelCase)
A_ : Union[str, Any] = []
A_ : Union[str, Any] = []
A_ : Tuple = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCamelCase)):
if indegree[i] == 0:
queue.append(lowerCamelCase)
while queue:
A_ : Any = queue.pop(0)
cnt += 1
topo.append(lowerCamelCase)
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowerCamelCase)
if cnt != len(lowerCamelCase):
print("""Cycle exists""")
else:
print(lowerCamelCase)
# Adjacency List of Graph
__magic_name__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 665 | 0 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
_lowercase = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
_lowercase = get_tests_dir("""fixtures/vocab.json""")
_lowercase = get_tests_dir("""fixtures""")
class lowercase_ ( unittest.TestCase ):
__lowerCamelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
def _snake_case ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : Any =0
def _snake_case ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : Optional[Any] =AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(_a , _a )
def _snake_case ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : Dict =WavaVecaConfig()
SCREAMING_SNAKE_CASE_ : Optional[Any] =AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(_a )
processor.save_pretrained(_a )
SCREAMING_SNAKE_CASE_ : str =AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def _snake_case ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(_a , os.path.join(_a , _a ) )
copyfile(_a , os.path.join(_a , '''vocab.json''' ) )
SCREAMING_SNAKE_CASE_ : List[Any] =AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def _snake_case ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : int =WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE_ : List[str] =AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
SCREAMING_SNAKE_CASE_ : List[str] =WavaVecaProcessor(_a , _a )
# save in new folder
processor.save_pretrained(_a )
# drop `processor_class` in tokenizer
with open(os.path.join(_a , _a ) , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ : Dict =json.load(_a )
config_dict.pop('''processor_class''' )
with open(os.path.join(_a , _a ) , '''w''' ) as f:
f.write(json.dumps(_a ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def _snake_case ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : str =WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE_ : List[Any] =AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
SCREAMING_SNAKE_CASE_ : int =WavaVecaProcessor(_a , _a )
# save in new folder
processor.save_pretrained(_a )
# drop `processor_class` in feature extractor
with open(os.path.join(_a , _a ) , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] =json.load(_a )
config_dict.pop('''processor_class''' )
with open(os.path.join(_a , _a ) , '''w''' ) as f:
f.write(json.dumps(_a ) )
SCREAMING_SNAKE_CASE_ : Any =AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def _snake_case ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : Optional[Any] =WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(_a )
# copy relevant files
copyfile(_a , os.path.join(_a , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(_a , _a ) , '''w''' ) as f:
f.write('''{}''' )
SCREAMING_SNAKE_CASE_ : Any =AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def _snake_case ( self ) -> Any:
with self.assertRaises(_a ):
SCREAMING_SNAKE_CASE_ : List[Any] =AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
SCREAMING_SNAKE_CASE_ : Any =AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_a )
SCREAMING_SNAKE_CASE_ : List[str] =AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_a )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] =processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
SCREAMING_SNAKE_CASE_ : int =processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE_ : Any =AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_a , use_fast=_a )
SCREAMING_SNAKE_CASE_ : Tuple =new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def _snake_case ( self ) -> List[str]:
try:
AutoConfig.register('''custom''' , _a )
AutoFeatureExtractor.register(_a , _a )
AutoTokenizer.register(_a , slow_tokenizer_class=_a )
AutoProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoProcessor.register(_a , _a )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE_ : Any =CustomFeatureExtractor.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : str =os.path.join(_a , '''vocab.txt''' )
with open(_a , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ : str =CustomTokenizer(_a )
SCREAMING_SNAKE_CASE_ : List[Any] =CustomProcessor(_a , _a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(_a )
SCREAMING_SNAKE_CASE_ : Any =AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _snake_case ( self ) -> Optional[Any]:
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
__lowerCamelCase = False
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
__lowerCamelCase = False
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
__lowerCamelCase = "AutoFeatureExtractor"
__lowerCamelCase = "AutoTokenizer"
__lowerCamelCase = False
try:
AutoConfig.register('''custom''' , _a )
AutoFeatureExtractor.register(_a , _a )
AutoTokenizer.register(_a , slow_tokenizer_class=_a )
AutoProcessor.register(_a , _a )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE_ : List[Any] =AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE_ : int =AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_a )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE_ : List[str] =AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_a )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _snake_case ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ : List[Any] =AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def _snake_case ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : List[Any] =AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class lowercase_ ( unittest.TestCase ):
__lowerCamelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _snake_case ( cls ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : Tuple =TOKEN
HfFolder.save_token(_a )
@classmethod
def _snake_case ( cls ) -> Optional[int]:
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def _snake_case ( self ) -> str:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =WavaVecaProcessor.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_a , '''test-processor''' ) , push_to_hub=_a , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_a , getattr(new_processor.feature_extractor , _a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _snake_case ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : Dict =WavaVecaProcessor.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_a , '''test-processor-org''' ) , push_to_hub=_a , use_auth_token=self._token , organization='''valid_org''' , )
SCREAMING_SNAKE_CASE_ : Any =WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_a , getattr(new_processor.feature_extractor , _a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _snake_case ( self ) -> str:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE_ : Tuple =CustomFeatureExtractor.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : Tuple =os.path.join(_a , '''vocab.txt''' )
with open(_a , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ : int =CustomTokenizer(_a )
SCREAMING_SNAKE_CASE_ : str =CustomProcessor(_a , _a )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'{USER}/test-dynamic-processor' , token=self._token )
SCREAMING_SNAKE_CASE_ : List[Any] =Repository(_a , clone_from=F'{USER}/test-dynamic-processor' , token=self._token )
processor.save_pretrained(_a )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(_a , '''tokenizer_config.json''' ) ) as f:
SCREAMING_SNAKE_CASE_ : List[str] =json.load(_a )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(_a , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(_a , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(_a , '''custom_processing.py''' ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE_ : Any =AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor' , trust_remote_code=_a )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 443 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[str]=7 ,_a : Dict=True ,_a : List[Any]=True ,_a : Dict=False ,_a : Optional[int]=True ,_a : List[Any]=99 ,_a : Any=32 ,_a : Optional[int]=5 ,_a : List[Any]=4 ,_a : int=37 ,_a : List[Any]="gelu" ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : Any=512 ,_a : int=16 ,_a : Optional[int]=2 ,_a : Any=0.02 ,_a : Any=3 ,_a : Any=4 ,_a : List[str]=None ,):
'''simple docstring'''
A_ : List[str] = parent
A_ : Any = batch_size
A_ : Tuple = seq_length
A_ : List[str] = is_training
A_ : Tuple = use_input_mask
A_ : Dict = use_token_type_ids
A_ : List[Any] = use_labels
A_ : Union[str, Any] = vocab_size
A_ : Any = hidden_size
A_ : str = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : str = intermediate_size
A_ : Tuple = hidden_act
A_ : Any = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : int = type_vocab_size
A_ : Union[str, Any] = type_sequence_label_size
A_ : Any = initializer_range
A_ : List[Any] = num_labels
A_ : Optional[Any] = num_choices
A_ : List[Any] = scope
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : int = None
if self.use_input_mask:
A_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Dict = None
if self.use_token_type_ids:
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : str = None
A_ : Any = None
A_ : str = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
A_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
def _a ( self : Union[str, Any] ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Any ,_a : Any ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Tuple ):
'''simple docstring'''
A_ : Any = LlamaModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[Any] = model(_a ,attention_mask=_a )
A_ : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Optional[int] ,_a : int ,_a : List[str] ,_a : Any ,_a : Any ,_a : Dict ,_a : List[str] ,_a : Optional[int] ,_a : Any ,_a : List[str] ,):
'''simple docstring'''
A_ : List[str] = True
A_ : Union[str, Any] = LlamaModel(_a )
model.to(_a )
model.eval()
A_ : Tuple = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,)
A_ : List[Any] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,)
A_ : int = model(_a ,attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Any ,_a : Any ,_a : Optional[int] ,_a : List[Any] ,_a : List[Any] ,_a : Dict ,_a : Tuple ,_a : Optional[int] ,_a : List[Any] ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : List[Any] = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
A_ : Dict = model(_a ,attention_mask=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : str ,_a : List[Any] ,_a : Dict ,_a : str ,_a : Tuple ,_a : Tuple ,_a : Tuple ,_a : Optional[Any] ,_a : Dict ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : Optional[Any] = True
A_ : Any = True
A_ : Tuple = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
A_ : Optional[int] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,use_cache=_a ,)
A_ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A_ : List[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
A_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
A_ : int = torch.cat([input_mask, next_mask] ,dim=-1 )
A_ : List[str] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
A_ : Any = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,past_key_values=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
# select random slice
A_ : List[str] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
A_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a ,_a ,atol=1e-3 ) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Any = config_and_inputs
A_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
a_ = (LlamaForCausalLM,) if is_torch_available() else ()
a_ = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = LlamaModelTester(self )
A_ : List[str] = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def _a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : Dict = type
self.model_tester.create_and_check_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = 3
A_ : Any = input_dict["""input_ids"""]
A_ : Union[str, Any] = input_ids.ne(1 ).to(_a )
A_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : int = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Dict ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : str = 3
A_ : Union[str, Any] = """single_label_classification"""
A_ : Union[str, Any] = input_dict["""input_ids"""]
A_ : List[Any] = input_ids.ne(1 ).to(_a )
A_ : Dict = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : List[str] = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = 3
A_ : Dict = """multi_label_classification"""
A_ : Any = input_dict["""input_ids"""]
A_ : Optional[Any] = input_ids.ne(1 ).to(_a )
A_ : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
A_ : Optional[int] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def _a ( self : Any ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _a ( self : Optional[Any] ,_a : List[Any] ):
'''simple docstring'''
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ids_tensor([1, 10] ,config.vocab_size )
A_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : int = LlamaModel(_a )
original_model.to(_a )
original_model.eval()
A_ : Tuple = original_model(_a ).last_hidden_state
A_ : Union[str, Any] = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : Tuple = {"""type""": scaling_type, """factor""": 10.0}
A_ : int = LlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
A_ : List[Any] = scaled_model(_a ).last_hidden_state
A_ : Any = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a ,_a ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Any = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" ,device_map="""auto""" )
A_ : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : str ):
'''simple docstring'''
A_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""auto""" )
A_ : int = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : Optional[int] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
A_ : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
A_ : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
A_ : List[str] = """Simply put, the theory of relativity states that """
A_ : Any = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
A_ : Union[str, Any] = tokenizer.encode(_a ,return_tensors="""pt""" )
A_ : List[str] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""sequential""" ,use_safetensors=_a )
# greedy generation outputs
A_ : str = model.generate(_a ,max_new_tokens=64 ,top_p=_a ,temperature=1 ,do_sample=_a )
A_ : Optional[Any] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=_a )
self.assertEqual(_a ,_a )
| 665 | 0 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowercase :
'''simple docstring'''
@staticmethod
def _a ( *lowerCamelCase__ , **lowerCamelCase__ ):
pass
@is_pipeline_test
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def _a ( self ):
lowerCAmelCase_: Optional[int] = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
lowerCAmelCase_: List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowerCAmelCase_: str = image_classifier(_a , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(_a ) , [
[{"score": 0.3_3_3, "label": "a"}, {"score": 0.3_3_3, "label": "b"}, {"score": 0.3_3_3, "label": "c"}],
[{"score": 0.3_3_3, "label": "a"}, {"score": 0.3_3_3, "label": "c"}, {"score": 0.3_3_3, "label": "b"}],
] , )
lowerCAmelCase_: Union[str, Any] = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(_a ) , [
[
{"score": 0.3_3_3, "label": ANY(_a )},
{"score": 0.3_3_3, "label": ANY(_a )},
{"score": 0.3_3_3, "label": ANY(_a )},
],
[
{"score": 0.3_3_3, "label": ANY(_a )},
{"score": 0.3_3_3, "label": ANY(_a )},
{"score": 0.3_3_3, "label": ANY(_a )},
],
[
{"score": 0.3_3_3, "label": ANY(_a )},
{"score": 0.3_3_3, "label": ANY(_a )},
{"score": 0.3_3_3, "label": ANY(_a )},
],
[
{"score": 0.3_3_3, "label": ANY(_a )},
{"score": 0.3_3_3, "label": ANY(_a )},
{"score": 0.3_3_3, "label": ANY(_a )},
],
[
{"score": 0.3_3_3, "label": ANY(_a )},
{"score": 0.3_3_3, "label": ANY(_a )},
{"score": 0.3_3_3, "label": ANY(_a )},
],
] , )
@require_tf
def _a ( self ):
lowerCAmelCase_: Optional[int] = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
lowerCAmelCase_: Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowerCAmelCase_: List[Any] = image_classifier(_a , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(_a ) , [{"score": 0.3_3_3, "label": "a"}, {"score": 0.3_3_3, "label": "b"}, {"score": 0.3_3_3, "label": "c"}] , )
lowerCAmelCase_: List[str] = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(_a ) , [
[
{"score": 0.3_3_3, "label": ANY(_a )},
{"score": 0.3_3_3, "label": ANY(_a )},
{"score": 0.3_3_3, "label": ANY(_a )},
],
[
{"score": 0.3_3_3, "label": ANY(_a )},
{"score": 0.3_3_3, "label": ANY(_a )},
{"score": 0.3_3_3, "label": ANY(_a )},
],
[
{"score": 0.3_3_3, "label": ANY(_a )},
{"score": 0.3_3_3, "label": ANY(_a )},
{"score": 0.3_3_3, "label": ANY(_a )},
],
[
{"score": 0.3_3_3, "label": ANY(_a )},
{"score": 0.3_3_3, "label": ANY(_a )},
{"score": 0.3_3_3, "label": ANY(_a )},
],
[
{"score": 0.3_3_3, "label": ANY(_a )},
{"score": 0.3_3_3, "label": ANY(_a )},
{"score": 0.3_3_3, "label": ANY(_a )},
],
] , )
@slow
@require_torch
def _a ( self ):
lowerCAmelCase_: Dict = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase_: Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowerCAmelCase_: Any = image_classifier(_a , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_a ) , [
{"score": 0.5_1_1, "label": "remote"},
{"score": 0.4_8_5, "label": "cat"},
{"score": 0.0_0_4, "label": "plane"},
] , )
lowerCAmelCase_: List[str] = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(_a ) , [
[
{"score": 0.5_1_1, "label": "remote"},
{"score": 0.4_8_5, "label": "cat"},
{"score": 0.0_0_4, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def _a ( self ):
lowerCAmelCase_: Dict = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
lowerCAmelCase_: str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowerCAmelCase_: List[str] = image_classifier(_a , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(_a ) , [
{"score": 0.5_1_1, "label": "remote"},
{"score": 0.4_8_5, "label": "cat"},
{"score": 0.0_0_4, "label": "plane"},
] , )
lowerCAmelCase_: Tuple = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(_a ) , [
[
{"score": 0.5_1_1, "label": "remote"},
{"score": 0.4_8_5, "label": "cat"},
{"score": 0.0_0_4, "label": "plane"},
],
]
* 5 , ) | 613 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__magic_name__ = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__magic_name__ = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__magic_name__ = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def _a ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) ,homepage="""https://github.com/hendrycks/math""" ,codebase_urls=["""https://github.com/hendrycks/math"""] ,)
def _a ( self : List[Any] ,_a : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = 0.0
for i, j in zip(_a ,_a ):
n_correct += 1.0 if math_equivalence.is_equiv(_a ,_a ) else 0.0
A_ : List[str] = n_correct / len(_a )
return {
"accuracy": accuracy,
}
| 665 | 0 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
a__ : Any =tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
a__ : int =DatasetInfosDict.from_directory(SCREAMING_SNAKE_CASE )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ),
] , )
def _A ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : DatasetInfo ):
"""simple docstring"""
a__ : Any =str(SCREAMING_SNAKE_CASE )
dataset_info.write_to_directory(SCREAMING_SNAKE_CASE )
a__ : Dict =DatasetInfo.from_directory(SCREAMING_SNAKE_CASE )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , "dataset_info.json" ) )
def _A ( ):
"""simple docstring"""
a__ : Any =DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 42}] , download_checksums={} , download_size=1_337 , post_processing_size=442 , dataset_size=1_234 , size_in_bytes=1_337 + 442 + 1_234 , )
a__ : Optional[Any] =dataset_info._to_yaml_dict()
assert sorted(SCREAMING_SNAKE_CASE ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
a__ : List[Any] =yaml.safe_dump(SCREAMING_SNAKE_CASE )
a__ : Tuple =yaml.safe_load(SCREAMING_SNAKE_CASE )
assert dataset_info_yaml_dict == reloaded
def _A ( ):
"""simple docstring"""
a__ : Tuple =DatasetInfo()
a__ : Tuple =dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=1_337 ),
} ),
] , )
def _A ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : DatasetInfosDict ):
"""simple docstring"""
a__ : Optional[Any] =str(SCREAMING_SNAKE_CASE )
dataset_infos_dict.write_to_directory(SCREAMING_SNAKE_CASE )
a__ : str =DatasetInfosDict.from_directory(SCREAMING_SNAKE_CASE )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
a__ : List[str] =config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
a__ : Optional[Any] =DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(SCREAMING_SNAKE_CASE , "README.md" ) )
| 563 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """retribert"""
def __init__( self : int ,_a : Dict=30522 ,_a : List[Any]=768 ,_a : Optional[Any]=8 ,_a : str=12 ,_a : str=3072 ,_a : Tuple="gelu" ,_a : Optional[int]=0.1 ,_a : Dict=0.1 ,_a : List[Any]=512 ,_a : Union[str, Any]=2 ,_a : Tuple=0.02 ,_a : List[str]=1e-12 ,_a : Dict=True ,_a : Tuple=128 ,_a : Optional[int]=0 ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,**_a )
A_ : Dict = vocab_size
A_ : int = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : int = intermediate_size
A_ : Tuple = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : Optional[int] = initializer_range
A_ : Dict = layer_norm_eps
A_ : str = share_encoders
A_ : List[Any] = projection_dim
| 665 | 0 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class snake_case ( yaml.SafeLoader ):
"""simple docstring"""
def a__ ( self, _lowercase ) -> Tuple:
SCREAMING_SNAKE_CASE_ = [self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE_ = [tuple(_a ) if isinstance(_a, _a ) else key for key in keys]
SCREAMING_SNAKE_CASE_ = Counter(_a )
SCREAMING_SNAKE_CASE_ = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f"""Got duplicate yaml keys: {duplicate_keys}""" )
def a__ ( self, _lowercase, _lowercase=False ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = super().construct_mapping(_a, deep=_a )
self._check_no_duplicates_on_constructed_node(_a )
return mapping
def _UpperCamelCase ( lowerCAmelCase__: str ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE_ = full_content[1:].index('---' ) + 1
SCREAMING_SNAKE_CASE_ = """\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase__ )
class snake_case ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_a = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def a__ ( cls, _lowercase ) -> Any:
with open(_a, encoding='utf-8' ) as readme_file:
SCREAMING_SNAKE_CASE_ = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_a )
else:
return cls()
def a__ ( self, _lowercase ) -> List[Any]:
if path.exists():
with open(_a, encoding='utf-8' ) as readme_file:
SCREAMING_SNAKE_CASE_ = readme_file.read()
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = self._to_readme(_a )
with open(_a, 'w', encoding='utf-8' ) as readme_file:
readme_file.write(_a )
def a__ ( self, _lowercase = None ) -> Tuple:
if readme_content is not None:
SCREAMING_SNAKE_CASE_ = _split_yaml_from_readme(_a )
SCREAMING_SNAKE_CASE_ = """---\n""" + self.to_yaml_string() + """---\n""" + content
else:
SCREAMING_SNAKE_CASE_ = """---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def a__ ( cls, _lowercase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = yaml.load(_a, Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE_ = {
(key.replace('-', '_' ) if key.replace('-', '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_a )
def a__ ( self ) -> Dict:
return yaml.safe_dump(
{
(key.replace('_', '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
}, sort_keys=_a, allow_unicode=_a, encoding='utf-8', ).decode('utf-8' )
SCREAMING_SNAKE_CASE : str = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
SCREAMING_SNAKE_CASE : str = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
SCREAMING_SNAKE_CASE : Optional[Any] = ap.parse_args()
SCREAMING_SNAKE_CASE : Union[str, Any] = Path(args.readme_filepath)
SCREAMING_SNAKE_CASE : int = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 294 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'spiece.model'}
__magic_name__ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
__magic_name__ = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = []
def __init__( self : Optional[int] ,_a : int ,_a : Optional[Any]="<unk>" ,_a : int="<s>" ,_a : str="</s>" ,_a : Optional[Any]="<pad>" ,_a : Tuple="[SEP]" ,_a : Tuple="[MASK]" ,_a : Union[str, Any]="[CLS]" ,_a : Optional[Dict[str, Any]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : List[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
A_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,pad_token=_a ,sep_token=_a ,mask_token=_a ,cls_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
A_ : Optional[int] = vocab_file
A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Tuple = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self.__dict__.copy()
A_ : Union[str, Any] = None
return state
def __setstate__( self : List[Any] ,_a : Any ):
'''simple docstring'''
A_ : Tuple = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
A_ : Tuple = {}
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self : Union[str, Any] ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def _a ( self : Optional[int] ,_a : str ):
'''simple docstring'''
return self.sp_model.piece_to_id(_a )
def _a ( self : int ,_a : Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.sp_model.IdToPiece(_a )
return token
def _a ( self : Dict ,_a : int ):
'''simple docstring'''
A_ : int = []
A_ : Any = """"""
A_ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
A_ : Dict = True
A_ : Union[str, Any] = []
else:
current_sub_tokens.append(_a )
A_ : str = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def _a ( self : int ,_a : List[int] ,_a : bool = False ,_a : bool = None ,_a : bool = True ,**_a : str ,):
'''simple docstring'''
A_ : Any = kwargs.pop("""use_source_tokenizer""" ,_a )
A_ : Union[str, Any] = self.convert_ids_to_tokens(_a ,skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A_ : str = []
A_ : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
A_ : List[str] = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
A_ : Optional[int] = re.sub(r""" (\[(MASK|SEP)\])""" ,r"""\1""" ,""" """.join(_a ) )
else:
A_ : Tuple = """""".join(_a )
A_ : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A_ : Optional[Any] = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,"""wb""" ) as fi:
A_ : str = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def _a ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : List[Any] = [self.cls_token_id]
A_ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def _a ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Tuple = [self.sep_token_id]
A_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 665 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {"""vocab_file""": """vocab.txt"""}
__UpperCAmelCase = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
__UpperCAmelCase = {
"""YituTech/conv-bert-base""": 512,
"""YituTech/conv-bert-medium-small""": 512,
"""YituTech/conv-bert-small""": 512,
}
__UpperCAmelCase = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase : List[Any] =VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] =PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Union[str, Any] =ConvBertTokenizer
def __init__( self : str , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Dict=True , lowerCAmelCase : List[str]="[UNK]" , lowerCAmelCase : Any="[SEP]" , lowerCAmelCase : str="[PAD]" , lowerCAmelCase : List[Any]="[CLS]" , lowerCAmelCase : List[str]="[MASK]" , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Any=None , **lowerCAmelCase : Optional[int] , ) -> List[Any]:
"""simple docstring"""
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
__lowerCAmelCase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _a ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _a ) != tokenize_chinese_chars
):
__lowerCAmelCase : Dict = getattr(_a , normalizer_state.pop("""type""" ) )
__lowerCAmelCase : str = do_lower_case
__lowerCAmelCase : Any = strip_accents
__lowerCAmelCase : int = tokenize_chinese_chars
__lowerCAmelCase : Tuple = normalizer_class(**_a )
__lowerCAmelCase : Any = do_lower_case
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Any=None ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : List[int] , lowerCAmelCase : Optional[List[int]] = None ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = [self.sep_token_id]
__lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 651 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _a ( self : List[str] ,_a : int ,_a : Any ,_a : int ):
'''simple docstring'''
A_ : Dict = TextaTextGenerationPipeline(model=_a ,tokenizer=_a )
return generator, ["Something to write", "Something else"]
def _a ( self : str ,_a : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : Any = generator("""Something there""" )
self.assertEqual(_a ,[{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
A_ : List[Any] = generator(["""This is great !""", """Something else"""] ,num_return_sequences=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
A_ : List[str] = generator(
["""This is great !""", """Something else"""] ,num_return_sequences=2 ,batch_size=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
with self.assertRaises(_a ):
generator(4 )
@require_torch
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : int = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""pt""" )
# do_sample=False necessary for reproducibility
A_ : Tuple = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
A_ : Optional[int] = 3
A_ : Tuple = generator(
"""Something there""" ,num_return_sequences=_a ,num_beams=_a ,)
A_ : Optional[Any] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a ,_a )
A_ : Optional[int] = generator("""This is a test""" ,do_sample=_a ,num_return_sequences=2 ,return_tensors=_a )
self.assertEqual(
_a ,[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] ,)
A_ : Dict = generator.model.config.eos_token_id
A_ : Optional[int] = """<pad>"""
A_ : List[Any] = generator(
["""This is a test""", """This is a second test"""] ,do_sample=_a ,num_return_sequences=2 ,batch_size=2 ,return_tensors=_a ,)
self.assertEqual(
_a ,[
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] ,)
@require_tf
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""tf""" )
# do_sample=False necessary for reproducibility
A_ : Dict = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
| 665 | 0 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_lowerCamelCase =logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE)
class A__ ( __SCREAMING_SNAKE_CASE):
def __init__( self , *__magic_name__ , **__magic_name__ ):
super().__init__(*_a , **_a )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCamelCase__ ( self , __magic_name__=None , __magic_name__=None , __magic_name__=None ):
lowerCamelCase : int = {}
lowerCamelCase : Optional[Any] = {}
if prompt is not None:
lowerCamelCase : Any = prompt
if generate_kwargs is not None:
lowerCamelCase : Any = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowerCamelCase : Any = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
lowerCamelCase : Tuple = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , __magic_name__ , **__magic_name__ ):
return super().__call__(_a , **_a )
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=None ):
lowerCamelCase : Optional[int] = load_image(_a )
if prompt is not None:
if not isinstance(_a , _a ):
raise ValueError(
F'''Received an invalid text input, got - {type(_a )} - but expected a single string. '''
"""Note also that one single text can be provided for conditional image to text generation.""" )
lowerCamelCase : Optional[Any] = self.model.config.model_type
if model_type == "git":
lowerCamelCase : Optional[Any] = self.image_processor(images=_a , return_tensors=self.framework )
lowerCamelCase : Any = self.tokenizer(text=_a , add_special_tokens=_a ).input_ids
lowerCamelCase : Union[str, Any] = [self.tokenizer.cls_token_id] + input_ids
lowerCamelCase : List[str] = torch.tensor(_a ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
lowerCamelCase : Optional[Any] = self.image_processor(images=_a , header_text=_a , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowerCamelCase : Dict = self.image_processor(images=_a , return_tensors=self.framework )
lowerCamelCase : Any = self.tokenizer(_a , return_tensors=self.framework )
model_inputs.update(_a )
else:
raise ValueError(F'''Model type {model_type} does not support conditional text generation''' )
else:
lowerCamelCase : List[str] = self.image_processor(images=_a , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowerCamelCase : List[Any] = None
return model_inputs
def UpperCamelCase__ ( self , __magic_name__ , __magic_name__=None ):
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , _a )
and all(x is None for x in model_inputs["""input_ids"""] )
):
lowerCamelCase : Tuple = None
if generate_kwargs is None:
lowerCamelCase : Optional[int] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowerCamelCase : Dict = model_inputs.pop(self.model.main_input_name )
lowerCamelCase : Dict = self.model.generate(_a , **_a , **_a )
return model_outputs
def UpperCamelCase__ ( self , __magic_name__ ):
lowerCamelCase : Optional[int] = []
for output_ids in model_outputs:
lowerCamelCase : Optional[Any] = {
"""generated_text""": self.tokenizer.decode(
_a , skip_special_tokens=_a , )
}
records.append(_a )
return records
| 681 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """gpt_bigcode"""
a_ = ["""past_key_values"""]
a_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int] ,_a : Optional[int]=50257 ,_a : Dict=1024 ,_a : Union[str, Any]=768 ,_a : Union[str, Any]=12 ,_a : Union[str, Any]=12 ,_a : Tuple=None ,_a : int="gelu_pytorch_tanh" ,_a : Optional[Any]=0.1 ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : List[Any]=1e-5 ,_a : List[str]=0.02 ,_a : Any=True ,_a : Union[str, Any]=True ,_a : Tuple=50256 ,_a : Optional[int]=50256 ,_a : int=True ,_a : Optional[int]=True ,_a : Optional[int]=True ,**_a : List[str] ,):
'''simple docstring'''
A_ : Optional[Any] = vocab_size
A_ : int = n_positions
A_ : Union[str, Any] = n_embd
A_ : int = n_layer
A_ : Optional[int] = n_head
A_ : Union[str, Any] = n_inner
A_ : List[Any] = activation_function
A_ : Dict = resid_pdrop
A_ : int = embd_pdrop
A_ : Optional[int] = attn_pdrop
A_ : Union[str, Any] = layer_norm_epsilon
A_ : int = initializer_range
A_ : Union[str, Any] = scale_attn_weights
A_ : List[str] = use_cache
A_ : Tuple = attention_softmax_in_fpaa
A_ : List[str] = scale_attention_softmax_in_fpaa
A_ : Union[str, Any] = multi_query
A_ : Any = bos_token_id
A_ : Optional[int] = eos_token_id
super().__init__(bos_token_id=_a ,eos_token_id=_a ,**_a )
| 665 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowerCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=10 ) ->List[Any]:
_SCREAMING_SNAKE_CASE = []
for _ in range(__lowerCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowerCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : str=10 ) ->Dict:
_SCREAMING_SNAKE_CASE = []
for step in range(__lowerCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , """schedule.bin""" )
torch.save(scheduler.state_dict() , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.load(__lowerCamelCase )
scheduler.load_state_dict(__lowerCamelCase )
return lrs
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case_( self , A , A , A ) -> Union[str, Any]:
self.assertEqual(len(_a ) , len(_a ) )
for a, b in zip(_a , _a ):
self.assertAlmostEqual(_a , _a , delta=_a )
def snake_case_( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_a )
_SCREAMING_SNAKE_CASE = torch.tensor([0.4, 0.2, -0.5] )
_SCREAMING_SNAKE_CASE = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_SCREAMING_SNAKE_CASE = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_SCREAMING_SNAKE_CASE = criterion(_a , _a )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_a )
_SCREAMING_SNAKE_CASE = torch.tensor([0.4, 0.2, -0.5] )
_SCREAMING_SNAKE_CASE = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_SCREAMING_SNAKE_CASE = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_a , weight_decay=0.0 , relative_step=_a , scale_parameter=_a , warmup_init=_a , )
for _ in range(1000 ):
_SCREAMING_SNAKE_CASE = criterion(_a , _a )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = nn.Linear(50 , 50 ) if is_torch_available() else None
UpperCamelCase = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
UpperCamelCase = 10
def snake_case_( self , A , A , A , A=None ) -> Dict:
self.assertEqual(len(_a ) , len(_a ) )
for a, b in zip(_a , _a ):
self.assertAlmostEqual(_a , _a , delta=_a , msg=_a )
def snake_case_( self ) -> int:
_SCREAMING_SNAKE_CASE = {"""num_warmup_steps""": 2, """num_training_steps""": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_SCREAMING_SNAKE_CASE = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_SCREAMING_SNAKE_CASE = data
_SCREAMING_SNAKE_CASE = scheduler_func(self.optimizer , **_a )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_SCREAMING_SNAKE_CASE = unwrap_schedule(_a , self.num_steps )
self.assertListAlmostEqual(
_a , _a , tol=1e-2 , msg=f'failed for {scheduler_func} in normal scheduler' , )
_SCREAMING_SNAKE_CASE = scheduler_func(self.optimizer , **_a )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_a ) # wrap to test picklability of the schedule
_SCREAMING_SNAKE_CASE = unwrap_and_save_reload_schedule(_a , self.num_steps )
self.assertListEqual(_a , _a , msg=f'failed for {scheduler_func} in save and reload' )
class a_ :
'''simple docstring'''
def __init__( self , A ) -> Dict:
_SCREAMING_SNAKE_CASE = fn
def __call__( self , *A , **A ) -> Dict:
return self.fn(*_a , **_a )
@classmethod
def snake_case_( self , A ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = list(map(self , scheduler.lr_lambdas ) )
| 314 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__magic_name__ = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__magic_name__ = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase ( ):
A_ : Union[str, Any] = (
list(range(ord("""!""") , ord("""~""") + 1)) + list(range(ord("""¡""") , ord("""¬""") + 1)) + list(range(ord("""®""") , ord("""ÿ""") + 1))
)
A_ : Optional[Any] = bs[:]
A_ : List[str] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase)
cs.append(2**8 + n)
n += 1
A_ : List[Any] = [chr(lowerCamelCase) for n in cs]
return dict(zip(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int):
A_ : int = set()
A_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
A_ : List[str] = char
return pairs
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : int ,_a : Tuple ,_a : Union[str, Any] ,_a : Optional[Any]="replace" ,_a : Union[str, Any]="<s>" ,_a : Union[str, Any]="</s>" ,_a : int="</s>" ,_a : List[str]="<s>" ,_a : List[Any]="<unk>" ,_a : Any="<pad>" ,_a : Dict="<mask>" ,_a : Optional[int]=False ,**_a : List[Any] ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
super().__init__(
errors=_a ,bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,**_a ,)
with open(_a ,encoding="""utf-8""" ) as vocab_handle:
A_ : str = json.load(_a )
A_ : Optional[int] = {v: k for k, v in self.encoder.items()}
A_ : List[str] = errors # how to handle errors in decoding
A_ : List[str] = bytes_to_unicode()
A_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_a ,encoding="""utf-8""" ) as merges_handle:
A_ : Any = merges_handle.read().split("""\n""" )[1:-1]
A_ : str = [tuple(merge.split() ) for merge in bpe_merges]
A_ : int = dict(zip(_a ,range(len(_a ) ) ) )
A_ : List[Any] = {}
A_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A_ : Optional[Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def _a ( self : Any ):
'''simple docstring'''
return len(self.encoder )
def _a ( self : str ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def _a ( self : int ,_a : int ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A_ : Optional[int] = tuple(_a )
A_ : Any = get_pairs(_a )
if not pairs:
return token
while True:
A_ : Optional[Any] = min(_a ,key=lambda _a : self.bpe_ranks.get(_a ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A_ , A_ : Dict = bigram
A_ : int = []
A_ : Optional[Any] = 0
while i < len(_a ):
try:
A_ : List[str] = word.index(_a ,_a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A_ : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A_ : str = tuple(_a )
A_ : str = new_word
if len(_a ) == 1:
break
else:
A_ : int = get_pairs(_a )
A_ : Optional[int] = """ """.join(_a )
A_ : List[str] = word
return word
def _a ( self : Dict ,_a : Optional[int] ):
'''simple docstring'''
A_ : Any = []
for token in re.findall(self.pat ,_a ):
A_ : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(""" """ ) )
return bpe_tokens
def _a ( self : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
return self.encoder.get(_a ,self.encoder.get(self.unk_token ) )
def _a ( self : int ,_a : Dict ):
'''simple docstring'''
return self.decoder.get(_a )
def _a ( self : Optional[int] ,_a : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = """""".join(_a )
A_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_a ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_a ,ensure_ascii=_a ) + """\n""" )
A_ : int = 0
with open(_a ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
A_ : Dict = token_index
writer.write(""" """.join(_a ) + """\n""" )
index += 1
return vocab_file, merge_file
def _a ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : int = [self.cls_token_id]
A_ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Union[str, Any] = [self.sep_token_id]
A_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : str ,_a : Optional[int] ,_a : Union[str, Any]=False ,**_a : Dict ):
'''simple docstring'''
A_ : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
A_ : Optional[int] = """ """ + text
return (text, kwargs)
| 665 | 0 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
__lowercase = dict(zip(_a , range(len(_a ) ) ) )
__lowercase = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
__lowercase = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_60_00,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowercase = os.path.join(self.tmpdirname , _a )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
# load decoder from hub
__lowercase = """hf-internal-testing/ngram-beam-search-decoder"""
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.add_kwargs_tokens_map.copy()
kwargs.update(_a )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_a )
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_a )
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_a )
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_feature_extractor()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
processor.save_pretrained(self.tmpdirname )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _a )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _a )
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowercase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_a , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_a , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
__lowercase = floats_list((3, 10_00) )
__lowercase = feature_extractor(_a , return_tensors='''np''' )
__lowercase = processor(_a , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
__lowercase = """This is a test string"""
__lowercase = processor(text=_a )
__lowercase = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__=(2, 10, 16) , lowerCAmelCase__=77 ) -> Optional[Any]:
'''simple docstring'''
np.random.seed(_a )
return np.random.rand(*_a )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
__lowercase = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__lowercase = processor.decode(_a )
__lowercase = decoder.decode_beams(_a )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
__lowercase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowercase = processor.batch_decode(_a )
else:
with get_context(_a ).Pool() as pool:
__lowercase = processor.batch_decode(_a , _a )
__lowercase = list(_a )
with get_context('''fork''' ).Pool() as p:
__lowercase = decoder.decode_beams_batch(_a , _a )
__lowercase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_a , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(_a , decoded_processor.logit_score )
self.assertListEqual(_a , decoded_processor.lm_score )
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
__lowercase = self._get_dummy_logits()
__lowercase = 15
__lowercase = -20.0
__lowercase = -4.0
__lowercase = processor.batch_decode(
_a , beam_width=_a , beam_prune_logp=_a , token_min_logp=_a , )
__lowercase = decoded_processor_out.text
__lowercase = list(_a )
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_a , _a , beam_width=_a , beam_prune_logp=_a , token_min_logp=_a , )
__lowercase = [d[0][0] for d in decoded_decoder_out]
__lowercase = [d[0][2] for d in decoded_decoder_out]
__lowercase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_a , _a )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _a )
self.assertTrue(np.array_equal(_a , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _a , atol=1E-3 ) )
self.assertTrue(np.array_equal(_a , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , _a , atol=1E-3 ) )
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
__lowercase = self._get_dummy_logits()
__lowercase = 2.0
__lowercase = 5.0
__lowercase = -20.0
__lowercase = True
__lowercase = processor.batch_decode(
_a , alpha=_a , beta=_a , unk_score_offset=_a , lm_score_boundary=_a , )
__lowercase = decoded_processor_out.text
__lowercase = list(_a )
decoder.reset_params(
alpha=_a , beta=_a , unk_score_offset=_a , lm_score_boundary=_a , )
with get_context('''fork''' ).Pool() as pool:
__lowercase = decoder.decode_beams_batch(
_a , _a , )
__lowercase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_a , _a )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _a )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _a )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_a )
__lowercase = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_a , _a )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = snapshot_download('''hf-internal-testing/processor_with_lm''' )
__lowercase = WavaVecaProcessorWithLM.from_pretrained(_a )
__lowercase = processor.decoder.model_container[processor.decoder._model_key]
__lowercase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowercase = os.listdir(_a )
__lowercase = os.listdir(_a )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_a , _a )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = floats_list((3, 10_00) )
__lowercase = processor_wavaveca(_a , return_tensors='''np''' )
__lowercase = processor_auto(_a , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
__lowercase = self._get_dummy_logits()
__lowercase = processor_wavaveca.batch_decode(_a )
__lowercase = processor_auto.batch_decode(_a )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = self.get_feature_extractor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_decoder()
__lowercase = WavaVecaProcessorWithLM(tokenizer=_a , feature_extractor=_a , decoder=_a )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
__lowercase = [d[key] for d in offsets]
return retrieved_list
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()[0]
__lowercase = processor.decode(_a , output_word_offsets=_a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_a , _a ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowercase = self._get_dummy_logits()
__lowercase = processor.batch_decode(_a , output_word_offsets=_a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_a , _a ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_a , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
import torch
__lowercase = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_a )
__lowercase = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_60_00 ) )
__lowercase = iter(_a )
__lowercase = next(_a )
__lowercase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
__lowercase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowercase = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
__lowercase = model(_a ).logits.cpu().numpy()
__lowercase = processor.decode(logits[0] , output_word_offsets=_a )
__lowercase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowercase = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
__lowercase = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_a , '''word''' ) ) , _a )
self.assertEqual(''' '''.join(self.get_from_offsets(_a , '''word''' ) ) , output.text )
# output times
__lowercase = torch.tensor(self.get_from_offsets(_a , '''start_time''' ) )
__lowercase = torch.tensor(self.get_from_offsets(_a , '''end_time''' ) )
# fmt: off
__lowercase = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
__lowercase = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_a , _a , atol=0.01 ) )
self.assertTrue(torch.allclose(_a , _a , atol=0.01 ) ) | 534 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.txt'}
__magic_name__ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__magic_name__ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__magic_name__ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ConvBertTokenizer
def __init__( self : str ,_a : Dict=None ,_a : List[Any]=None ,_a : Dict=True ,_a : List[str]="[UNK]" ,_a : Any="[SEP]" ,_a : str="[PAD]" ,_a : List[Any]="[CLS]" ,_a : List[str]="[MASK]" ,_a : Union[str, Any]=True ,_a : Any=None ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
A_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_a ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_a ) != tokenize_chinese_chars
):
A_ : Dict = getattr(_a ,normalizer_state.pop("""type""" ) )
A_ : str = do_lower_case
A_ : Any = strip_accents
A_ : int = tokenize_chinese_chars
A_ : Tuple = normalizer_class(**_a )
A_ : Any = do_lower_case
def _a ( self : List[Any] ,_a : List[Any] ,_a : Any=None ):
'''simple docstring'''
A_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : int = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : List[Any] = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 665 | 0 |
def lowerCAmelCase_ ( _lowercase : list[list[float]]) -> List[str]:
"""simple docstring"""
a__ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(_lowercase):
if len(_lowercase) < i + 1:
data_lists.append([])
data_lists[i].append(float(_lowercase))
return data_lists
def lowerCAmelCase_ ( _lowercase : list[list[float]] , _lowercase : list[int]) -> Tuple:
"""simple docstring"""
a__ : list[list[float]] = []
for dlist, weight in zip(_lowercase , _lowercase):
a__ : int = min(_lowercase)
a__ : Dict = max(_lowercase)
a__ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)))
except ZeroDivisionError:
score.append(1)
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind))
except ZeroDivisionError:
score.append(0)
# weight not 0 or 1
else:
a__ : Optional[int] = F'''Invalid weight of {weight:f} provided'''
raise ValueError(_lowercase)
score_lists.append(_lowercase)
return score_lists
def lowerCAmelCase_ ( _lowercase : list[list[float]]) -> Tuple:
"""simple docstring"""
a__ : list[float] = [0 for i in range(len(score_lists[0]))]
for slist in score_lists:
for j, ele in enumerate(_lowercase):
a__ : List[str] = final_scores[j] + ele
return final_scores
def lowerCAmelCase_ ( _lowercase : list[list[float]] , _lowercase : list[int]) -> int:
"""simple docstring"""
a__ : str = get_data(_lowercase)
a__ : List[Any] = calculate_each_score(_lowercase , _lowercase)
a__ : Any = generate_final_scores(_lowercase)
# append scores to source data
for i, ele in enumerate(_lowercase):
source_data[i].append(_lowercase)
return source_data
| 136 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__magic_name__ = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = BartTokenizer
def __init__( self : str ,_a : Any=None ,_a : Optional[int]=None ,_a : int=None ,_a : Optional[int]="replace" ,_a : Dict="<s>" ,_a : Optional[Any]="</s>" ,_a : Dict="</s>" ,_a : Tuple="<s>" ,_a : Optional[Any]="<unk>" ,_a : List[str]="<pad>" ,_a : int="<mask>" ,_a : str=False ,_a : List[str]=True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(
_a ,_a ,tokenizer_file=_a ,errors=_a ,bos_token=_a ,eos_token=_a ,sep_token=_a ,cls_token=_a ,unk_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,trim_offsets=_a ,**_a ,)
A_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : List[str] = getattr(_a ,pre_tok_state.pop("""type""" ) )
A_ : Optional[int] = add_prefix_space
A_ : int = pre_tok_class(**_a )
A_ : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ : str = """post_processor"""
A_ : List[Any] = getattr(self.backend_tokenizer ,_a ,_a )
if tokenizer_component_instance:
A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : Tuple = tuple(state["""sep"""] )
if "cls" in state:
A_ : Tuple = tuple(state["""cls"""] )
A_ : List[str] = False
if state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : Dict = add_prefix_space
A_ : Any = True
if state.get("""trim_offsets""" ,_a ) != trim_offsets:
A_ : Union[str, Any] = trim_offsets
A_ : List[Any] = True
if changes_to_apply:
A_ : Optional[int] = getattr(_a ,state.pop("""type""" ) )
A_ : Tuple = component_class(**_a )
setattr(self.backend_tokenizer ,_a ,_a )
@property
def _a ( self : List[str] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _a ( self : Union[str, Any] ,_a : Any ):
'''simple docstring'''
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else value
A_ : List[Any] = value
def _a ( self : str ,*_a : str ,**_a : Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*_a ,**_a )
def _a ( self : str ,*_a : List[Any] ,**_a : str ):
'''simple docstring'''
A_ : List[str] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*_a ,**_a )
def _a ( self : Optional[int] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : str = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
def _a ( self : str ,_a : Optional[int] ,_a : int=None ):
'''simple docstring'''
A_ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Dict = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 665 | 0 |
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
UpperCAmelCase__ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Optional[Any],_SCREAMING_SNAKE_CASE : Optional[Any],_SCREAMING_SNAKE_CASE : Dict,_SCREAMING_SNAKE_CASE : Optional[int],_SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
for attribute in key.split('.' ):
__A= getattr(_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE )
if weight_type is not None:
__A= getattr(_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE ).shape
else:
__A= hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__A= value
elif weight_type == "weight_g":
__A= value
elif weight_type == "weight_v":
__A= value
elif weight_type == "bias":
__A= value
else:
__A= value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : str,_SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__A= []
__A= fairseq_model.state_dict()
__A= hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__A= False
if "conv_layers" in name:
load_conv_layer(
_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE,hf_model.config.feat_extract_norm == 'group',)
__A= True
else:
for key, mapped_key in MAPPING.items():
__A= """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
__A= True
if "*" in mapped_key:
__A= name.split(_SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
__A= mapped_key.replace('*',_SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__A= """weight_g"""
elif "weight_v" in name:
__A= """weight_v"""
elif "bias" in name:
__A= """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A= """weight"""
else:
__A= None
set_recursively(_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(_SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Tuple,_SCREAMING_SNAKE_CASE : int,_SCREAMING_SNAKE_CASE : int,_SCREAMING_SNAKE_CASE : List[str],_SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
__A= full_name.split('conv_layers.' )[-1]
__A= name.split('.' )
__A= int(items[0] )
__A= int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__A= value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__A= value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
__A= value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
__A= value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Dict,_SCREAMING_SNAKE_CASE : Any,_SCREAMING_SNAKE_CASE : Dict=None,_SCREAMING_SNAKE_CASE : List[Any]=None,_SCREAMING_SNAKE_CASE : Optional[int]=True ):
"""simple docstring"""
if config_path is not None:
__A= UniSpeechSatConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
else:
__A= UniSpeechSatConfig()
__A= """"""
if is_finetuned:
__A= UniSpeechSatForCTC(_SCREAMING_SNAKE_CASE )
else:
__A= UniSpeechSatForPreTraining(_SCREAMING_SNAKE_CASE )
__A= fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path],arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__A= model[0].eval()
recursively_load_weights(_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
UpperCAmelCase__ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 186 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
A_ : int = TapasConfig.from_json_file(lowerCamelCase)
# set absolute/relative position embeddings parameter
A_ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
A_ : Optional[int] = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WTQ":
# run_task_main.py hparams
A_ : Tuple = 4
A_ : Optional[Any] = True
# hparam_utils.py hparams
A_ : Any = 0.66_4694
A_ : str = 0.20_7951
A_ : Any = 0.12_1194
A_ : str = True
A_ : Dict = True
A_ : int = False
A_ : int = 0.035_2513
A_ : Tuple = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
A_ : int = 4
A_ : Union[str, Any] = False
# hparam_utils.py hparams
A_ : Dict = 36.4519
A_ : List[Any] = 0.90_3421
A_ : Any = 222.088
A_ : Optional[Any] = True
A_ : Optional[int] = True
A_ : Optional[Any] = True
A_ : Optional[int] = 0.76_3141
A_ : Any = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "TABFACT":
A_ : Any = TapasForSequenceClassification(config=lowerCamelCase)
elif task == "MLM":
A_ : List[Any] = TapasForMaskedLM(config=lowerCamelCase)
elif task == "INTERMEDIATE_PRETRAINING":
A_ : Union[str, Any] = TapasModel(config=lowerCamelCase)
else:
raise ValueError(F'Task {task} not supported.')
print(F'Building PyTorch model from configuration: {config}')
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}')
model.save_pretrained(lowerCamelCase)
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}')
A_ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512)
tokenizer.save_pretrained(lowerCamelCase)
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 665 | 0 |
'''simple docstring'''
def UpperCamelCase_ ( A__ ):
a_ = set()
# edges = list of graph's edges
a_ = get_edges(A__ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
a_ = edges.pop()
chosen_vertices.add(A__ )
chosen_vertices.add(A__ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(A__ )
return chosen_vertices
def UpperCamelCase_ ( A__ ):
a_ = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 263 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""vqvae"""]
def __init__( self : Optional[Any] ,_a : AutoencoderKL ,_a : UNetaDConditionModel ,_a : Mel ,_a : Union[DDIMScheduler, DDPMScheduler] ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_a ,scheduler=_a ,mel=_a ,vqvae=_a )
def _a ( self : str ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_a ) else 1000
@torch.no_grad()
def __call__( self : Optional[int] ,_a : int = 1 ,_a : str = None ,_a : np.ndarray = None ,_a : int = 0 ,_a : int = 0 ,_a : int = None ,_a : torch.Generator = None ,_a : float = 0 ,_a : float = 0 ,_a : torch.Generator = None ,_a : float = 0 ,_a : torch.Tensor = None ,_a : torch.Tensor = None ,_a : int=True ,):
'''simple docstring'''
A_ : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_a )
A_ : Union[str, Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
A_ : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
A_ : int = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_a ,device=self.device ,)
A_ : List[Any] = noise
A_ : str = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_a ,_a )
A_ : Any = self.mel.audio_slice_to_image(_a )
A_ : Union[str, Any] = np.frombuffer(input_image.tobytes() ,dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
A_ : Optional[Any] = (input_image / 255) * 2 - 1
A_ : Union[str, Any] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
A_ : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(_a ,0 ) ).latent_dist.sample(
generator=_a )[0]
A_ : List[str] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
A_ : Any = self.scheduler.add_noise(_a ,_a ,self.scheduler.timesteps[start_step - 1] )
A_ : Tuple = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
A_ : Tuple = int(mask_start_secs * pixels_per_second )
A_ : str = int(mask_end_secs * pixels_per_second )
A_ : int = self.scheduler.add_noise(_a ,_a ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_a ):
A_ : Optional[Any] = self.unet(_a ,_a ,_a )["""sample"""]
else:
A_ : List[Any] = self.unet(_a ,_a )["""sample"""]
if isinstance(self.scheduler ,_a ):
A_ : Dict = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,eta=_a ,generator=_a ,)["""prev_sample"""]
else:
A_ : Any = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,generator=_a ,)["""prev_sample"""]
if mask is not None:
if mask_start > 0:
A_ : Tuple = mask[:, step, :, :mask_start]
if mask_end > 0:
A_ : List[str] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
A_ : str = 1 / self.vqvae.config.scaling_factor * images
A_ : Union[str, Any] = self.vqvae.decode(_a )["""sample"""]
A_ : int = (images / 2 + 0.5).clamp(0 ,1 )
A_ : str = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
A_ : Optional[int] = (images * 255).round().astype("""uint8""" )
A_ : List[Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_a ,mode="""RGB""" ).convert("""L""" ) for _ in images) )
A_ : Tuple = [self.mel.image_to_audio(_a ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_a )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_a ) )
@torch.no_grad()
def _a ( self : Union[str, Any] ,_a : List[Image.Image] ,_a : int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_a )
self.scheduler.set_timesteps(_a )
A_ : Optional[Any] = np.array(
[np.frombuffer(image.tobytes() ,dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
A_ : List[str] = (sample / 255) * 2 - 1
A_ : Optional[int] = torch.Tensor(_a ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
A_ : List[str] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
A_ : Any = self.scheduler.alphas_cumprod[t]
A_ : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
A_ : str = 1 - alpha_prod_t
A_ : List[str] = self.unet(_a ,_a )["""sample"""]
A_ : str = (1 - alpha_prod_t_prev) ** 0.5 * model_output
A_ : Union[str, Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
A_ : Optional[int] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _a ( _a : torch.Tensor ,_a : torch.Tensor ,_a : float ):
'''simple docstring'''
A_ : List[Any] = acos(torch.dot(torch.flatten(_a ) ,torch.flatten(_a ) ) / torch.norm(_a ) / torch.norm(_a ) )
return sin((1 - alpha) * theta ) * xa / sin(_a ) + sin(alpha * theta ) * xa / sin(_a )
| 665 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.