code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
__magic_name__ = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
__magic_name__ = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
__magic_name__ = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
__magic_name__ = f"""down_blocks.{i}.resnets.{j}."""
__magic_name__ = f"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
__magic_name__ = f"""down_blocks.{i}.attentions.{j}."""
__magic_name__ = f"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
__magic_name__ = f"""up_blocks.{i}.resnets.{j}."""
__magic_name__ = f"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
__magic_name__ = f"""up_blocks.{i}.attentions.{j}."""
__magic_name__ = f"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
__magic_name__ = f"""down_blocks.{i}.downsamplers.0.conv."""
__magic_name__ = f"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
__magic_name__ = f"""up_blocks.{i}.upsamplers.0."""
__magic_name__ = f"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
__magic_name__ = 'mid_block.attentions.0.'
__magic_name__ = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
__magic_name__ = f"""mid_block.resnets.{j}."""
__magic_name__ = f"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCamelCase ( lowerCamelCase : str):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
A_ : Tuple = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
A_ : Optional[int] = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
A_ : Optional[Any] = v.replace(lowerCamelCase , lowerCamelCase)
A_ : Optional[Any] = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
A_ : Union[str, Any] = v.replace(lowerCamelCase , lowerCamelCase)
A_ : List[str] = v
A_ : List[Any] = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
__magic_name__ = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
__magic_name__ = f"""encoder.down_blocks.{i}.resnets.{j}."""
__magic_name__ = f"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
__magic_name__ = f"""down_blocks.{i}.downsamplers.0."""
__magic_name__ = f"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
__magic_name__ = f"""up_blocks.{i}.upsamplers.0."""
__magic_name__ = f"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
__magic_name__ = f"""decoder.up_blocks.{i}.resnets.{j}."""
__magic_name__ = f"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
__magic_name__ = f"""mid_block.resnets.{i}."""
__magic_name__ = f"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
__magic_name__ = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def lowerCamelCase ( lowerCamelCase : Any):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1)
def lowerCamelCase ( lowerCamelCase : List[str]):
A_ : Union[str, Any] = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
A_ : List[str] = v.replace(lowerCamelCase , lowerCamelCase)
A_ : Tuple = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
A_ : List[Any] = v.replace(lowerCamelCase , lowerCamelCase)
A_ : List[str] = v
A_ : int = {v: vae_state_dict[k] for k, v in mapping.items()}
A_ : Optional[int] = ["""q""", """k""", """v""", """proj_out"""]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F'mid.attn_1.{weight_name}.weight' in k:
print(F'Reshaping {k} for SD format')
A_ : Optional[int] = reshape_weight_for_sd(lowerCamelCase)
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
__magic_name__ = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
__magic_name__ = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
__magic_name__ = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
__magic_name__ = {'q': 0, 'k': 1, 'v': 2}
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
A_ : Optional[Any] = {}
A_ : Union[str, Any] = {}
A_ : Optional[Any] = {}
for k, v in text_enc_dict.items():
if (
k.endswith(""".self_attn.q_proj.weight""")
or k.endswith(""".self_attn.k_proj.weight""")
or k.endswith(""".self_attn.v_proj.weight""")
):
A_ : List[Any] = k[: -len(""".q_proj.weight""")]
A_ : List[Any] = k[-len("""q_proj.weight""")]
if k_pre not in capture_qkv_weight:
A_ : Any = [None, None, None]
A_ : int = v
continue
if (
k.endswith(""".self_attn.q_proj.bias""")
or k.endswith(""".self_attn.k_proj.bias""")
or k.endswith(""".self_attn.v_proj.bias""")
):
A_ : Optional[int] = k[: -len(""".q_proj.bias""")]
A_ : Optional[Any] = k[-len("""q_proj.bias""")]
if k_pre not in capture_qkv_bias:
A_ : Dict = [None, None, None]
A_ : List[str] = v
continue
A_ : List[Any] = textenc_pattern.sub(lambda lowerCamelCase: protected[re.escape(m.group(0))] , lowerCamelCase)
A_ : List[str] = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""")
A_ : int = textenc_pattern.sub(lambda lowerCamelCase: protected[re.escape(m.group(0))] , lowerCamelCase)
A_ : int = torch.cat(lowerCamelCase)
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""")
A_ : str = textenc_pattern.sub(lambda lowerCamelCase: protected[re.escape(m.group(0))] , lowerCamelCase)
A_ : int = torch.cat(lowerCamelCase)
return new_state_dict
def lowerCamelCase ( lowerCamelCase : str):
return text_enc_dict
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
__magic_name__ = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
__magic_name__ = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
__magic_name__ = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
__magic_name__ = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
__magic_name__ = load_file(unet_path, device='cpu')
else:
__magic_name__ = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
__magic_name__ = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
__magic_name__ = load_file(vae_path, device='cpu')
else:
__magic_name__ = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
__magic_name__ = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
__magic_name__ = load_file(text_enc_path, device='cpu')
else:
__magic_name__ = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
__magic_name__ = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
__magic_name__ = convert_unet_state_dict(unet_state_dict)
__magic_name__ = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
__magic_name__ = convert_vae_state_dict(vae_state_dict)
__magic_name__ = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
__magic_name__ = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
__magic_name__ = {'transformer.' + k: v for k, v in text_enc_dict.items()}
__magic_name__ = convert_text_enc_state_dict_vaa(text_enc_dict)
__magic_name__ = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
__magic_name__ = convert_text_enc_state_dict(text_enc_dict)
__magic_name__ = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
__magic_name__ = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
__magic_name__ = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
__magic_name__ = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 665 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.txt'}
__magic_name__ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__magic_name__ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__magic_name__ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ConvBertTokenizer
def __init__( self : str ,_a : Dict=None ,_a : List[Any]=None ,_a : Dict=True ,_a : List[str]="[UNK]" ,_a : Any="[SEP]" ,_a : str="[PAD]" ,_a : List[Any]="[CLS]" ,_a : List[str]="[MASK]" ,_a : Union[str, Any]=True ,_a : Any=None ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
A_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_a ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_a ) != tokenize_chinese_chars
):
A_ : Dict = getattr(_a ,normalizer_state.pop("""type""" ) )
A_ : str = do_lower_case
A_ : Any = strip_accents
A_ : int = tokenize_chinese_chars
A_ : Tuple = normalizer_class(**_a )
A_ : Any = do_lower_case
def _a ( self : List[Any] ,_a : List[Any] ,_a : Any=None ):
'''simple docstring'''
A_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : int = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : List[Any] = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 665 | 1 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = DistilBertTokenizer
a_ = DistilBertTokenizerFast
a_ = True
@slow
def _a ( self : str ):
'''simple docstring'''
A_ : str = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
A_ : Optional[Any] = tokenizer.encode("""sequence builders""" ,add_special_tokens=_a )
A_ : List[Any] = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=_a )
A_ : Tuple = tokenizer.build_inputs_with_special_tokens(_a )
A_ : List[Any] = tokenizer.build_inputs_with_special_tokens(_a ,_a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 665 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__magic_name__ = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = BartTokenizer
def __init__( self : str ,_a : Any=None ,_a : Optional[int]=None ,_a : int=None ,_a : Optional[int]="replace" ,_a : Dict="<s>" ,_a : Optional[Any]="</s>" ,_a : Dict="</s>" ,_a : Tuple="<s>" ,_a : Optional[Any]="<unk>" ,_a : List[str]="<pad>" ,_a : int="<mask>" ,_a : str=False ,_a : List[str]=True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(
_a ,_a ,tokenizer_file=_a ,errors=_a ,bos_token=_a ,eos_token=_a ,sep_token=_a ,cls_token=_a ,unk_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,trim_offsets=_a ,**_a ,)
A_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : List[str] = getattr(_a ,pre_tok_state.pop("""type""" ) )
A_ : Optional[int] = add_prefix_space
A_ : int = pre_tok_class(**_a )
A_ : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ : str = """post_processor"""
A_ : List[Any] = getattr(self.backend_tokenizer ,_a ,_a )
if tokenizer_component_instance:
A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : Tuple = tuple(state["""sep"""] )
if "cls" in state:
A_ : Tuple = tuple(state["""cls"""] )
A_ : List[str] = False
if state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : Dict = add_prefix_space
A_ : Any = True
if state.get("""trim_offsets""" ,_a ) != trim_offsets:
A_ : Union[str, Any] = trim_offsets
A_ : List[Any] = True
if changes_to_apply:
A_ : Optional[int] = getattr(_a ,state.pop("""type""" ) )
A_ : Tuple = component_class(**_a )
setattr(self.backend_tokenizer ,_a ,_a )
@property
def _a ( self : List[str] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _a ( self : Union[str, Any] ,_a : Any ):
'''simple docstring'''
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else value
A_ : List[Any] = value
def _a ( self : str ,*_a : str ,**_a : Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*_a ,**_a )
def _a ( self : str ,*_a : List[Any] ,**_a : str ):
'''simple docstring'''
A_ : List[str] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*_a ,**_a )
def _a ( self : Optional[int] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : str = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
def _a ( self : str ,_a : Optional[int] ,_a : int=None ):
'''simple docstring'''
A_ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Dict = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 665 | 1 |
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : nn.ModuleList , lowerCamelCase : nn.ModuleList , lowerCamelCase : List[int]):
A_ : str = nn.ModuleList([src_layers[i] for i in layers_to_copy])
assert len(lowerCamelCase) == len(lowerCamelCase), F'{len(lowerCamelCase)} != {len(lowerCamelCase)}'
dest_layers.load_state_dict(layers_to_copy.state_dict())
__magic_name__ = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__magic_name__ = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : Dict):
try:
A_ : Union[str, Any] = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'
F' {n_student}')
return list(range(lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
if n_student > n_teacher:
raise ValueError(F'Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}')
elif n_teacher == n_student:
return list(range(lowerCamelCase))
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def lowerCamelCase ( lowerCamelCase : Union[str, PreTrainedModel] , lowerCamelCase : Union[str, Path] = "student" , lowerCamelCase : Union[int, None] = None , lowerCamelCase : Union[int, None] = None , lowerCamelCase : Dict=False , lowerCamelCase : List[str]=None , lowerCamelCase : Optional[Any]=None , **lowerCamelCase : str , ):
A_ : int = """encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."""
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCamelCase , lowerCamelCase):
AutoTokenizer.from_pretrained(lowerCamelCase).save_pretrained(lowerCamelCase) # purely for convenience
A_ : int = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase).eval()
else:
assert isinstance(lowerCamelCase , lowerCamelCase), F'teacher must be a model or string got type {type(lowerCamelCase)}'
A_ : Tuple = teacher.config.to_diff_dict()
try:
A_ , A_ : Dict = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
A_ : int = teacher_e
if d is None:
A_ : Any = teacher_d
init_kwargs.update({"""encoder_layers""": e, """decoder_layers""": d})
except AttributeError: # T5
if hasattr(teacher.config , """num_encoder_layers"""):
A_ , A_ : str = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
A_ , A_ : Union[str, Any] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
A_ : Tuple = teacher_e
if d is None:
A_ : Optional[Any] = teacher_d
if hasattr(teacher.config , """num_encoder_layers"""):
init_kwargs.update({"""num_encoder_layers""": e, """num_decoder_layers""": d})
else:
init_kwargs.update({"""num_layers""": e, """num_decoder_layers""": d})
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCamelCase)
# Copy weights
A_ : List[str] = teacher.config_class(**lowerCamelCase)
A_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_config(lowerCamelCase)
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
A_ : Optional[Any] = student.load_state_dict(teacher.state_dict() , strict=lowerCamelCase)
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
A_ , A_ : int = list(range(lowerCamelCase)), list(range(lowerCamelCase))
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'
F' {save_path}')
student.save_pretrained(lowerCamelCase)
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
A_ : List[int] = pick_layers_to_copy(lowerCamelCase , lowerCamelCase)
if d_layers_to_copy is None:
A_ : List[int] = pick_layers_to_copy(lowerCamelCase , lowerCamelCase)
try:
if hasattr(
lowerCamelCase , """prophetnet"""): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCamelCase)
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCamelCase)
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCamelCase)
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCamelCase)
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCamelCase)
copy_layers(teacher.decoder.block , student.decoder.block , lowerCamelCase)
logger.info(
F'Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}')
A_ : Any = {
"""teacher_type""": teacher.config.model_type,
"""copied_encoder_layers""": e_layers_to_copy,
"""copied_decoder_layers""": d_layers_to_copy,
}
student.save_pretrained(lowerCamelCase)
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 665 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
A_ : int = TapasConfig.from_json_file(lowerCamelCase)
# set absolute/relative position embeddings parameter
A_ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
A_ : Optional[int] = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WTQ":
# run_task_main.py hparams
A_ : Tuple = 4
A_ : Optional[Any] = True
# hparam_utils.py hparams
A_ : Any = 0.66_4694
A_ : str = 0.20_7951
A_ : Any = 0.12_1194
A_ : str = True
A_ : Dict = True
A_ : int = False
A_ : int = 0.035_2513
A_ : Tuple = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
A_ : int = 4
A_ : Union[str, Any] = False
# hparam_utils.py hparams
A_ : Dict = 36.4519
A_ : List[Any] = 0.90_3421
A_ : Any = 222.088
A_ : Optional[Any] = True
A_ : Optional[int] = True
A_ : Optional[Any] = True
A_ : Optional[int] = 0.76_3141
A_ : Any = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "TABFACT":
A_ : Any = TapasForSequenceClassification(config=lowerCamelCase)
elif task == "MLM":
A_ : List[Any] = TapasForMaskedLM(config=lowerCamelCase)
elif task == "INTERMEDIATE_PRETRAINING":
A_ : Union[str, Any] = TapasModel(config=lowerCamelCase)
else:
raise ValueError(F'Task {task} not supported.')
print(F'Building PyTorch model from configuration: {config}')
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}')
model.save_pretrained(lowerCamelCase)
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}')
A_ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512)
tokenizer.save_pretrained(lowerCamelCase)
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 665 | 1 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__magic_name__ = True
except (ImportError, ModuleNotFoundError):
__magic_name__ = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowerCamelCase ( lowerCamelCase : str):
re.sub("""<n>""" , """""" , lowerCamelCase) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCamelCase))
| 665 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""vqvae"""]
def __init__( self : Optional[Any] ,_a : AutoencoderKL ,_a : UNetaDConditionModel ,_a : Mel ,_a : Union[DDIMScheduler, DDPMScheduler] ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_a ,scheduler=_a ,mel=_a ,vqvae=_a )
def _a ( self : str ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_a ) else 1000
@torch.no_grad()
def __call__( self : Optional[int] ,_a : int = 1 ,_a : str = None ,_a : np.ndarray = None ,_a : int = 0 ,_a : int = 0 ,_a : int = None ,_a : torch.Generator = None ,_a : float = 0 ,_a : float = 0 ,_a : torch.Generator = None ,_a : float = 0 ,_a : torch.Tensor = None ,_a : torch.Tensor = None ,_a : int=True ,):
'''simple docstring'''
A_ : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_a )
A_ : Union[str, Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
A_ : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
A_ : int = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_a ,device=self.device ,)
A_ : List[Any] = noise
A_ : str = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_a ,_a )
A_ : Any = self.mel.audio_slice_to_image(_a )
A_ : Union[str, Any] = np.frombuffer(input_image.tobytes() ,dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
A_ : Optional[Any] = (input_image / 255) * 2 - 1
A_ : Union[str, Any] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
A_ : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(_a ,0 ) ).latent_dist.sample(
generator=_a )[0]
A_ : List[str] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
A_ : Any = self.scheduler.add_noise(_a ,_a ,self.scheduler.timesteps[start_step - 1] )
A_ : Tuple = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
A_ : Tuple = int(mask_start_secs * pixels_per_second )
A_ : str = int(mask_end_secs * pixels_per_second )
A_ : int = self.scheduler.add_noise(_a ,_a ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_a ):
A_ : Optional[Any] = self.unet(_a ,_a ,_a )["""sample"""]
else:
A_ : List[Any] = self.unet(_a ,_a )["""sample"""]
if isinstance(self.scheduler ,_a ):
A_ : Dict = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,eta=_a ,generator=_a ,)["""prev_sample"""]
else:
A_ : Any = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,generator=_a ,)["""prev_sample"""]
if mask is not None:
if mask_start > 0:
A_ : Tuple = mask[:, step, :, :mask_start]
if mask_end > 0:
A_ : List[str] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
A_ : str = 1 / self.vqvae.config.scaling_factor * images
A_ : Union[str, Any] = self.vqvae.decode(_a )["""sample"""]
A_ : int = (images / 2 + 0.5).clamp(0 ,1 )
A_ : str = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
A_ : Optional[int] = (images * 255).round().astype("""uint8""" )
A_ : List[Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_a ,mode="""RGB""" ).convert("""L""" ) for _ in images) )
A_ : Tuple = [self.mel.image_to_audio(_a ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_a )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_a ) )
@torch.no_grad()
def _a ( self : Union[str, Any] ,_a : List[Image.Image] ,_a : int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_a )
self.scheduler.set_timesteps(_a )
A_ : Optional[Any] = np.array(
[np.frombuffer(image.tobytes() ,dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
A_ : List[str] = (sample / 255) * 2 - 1
A_ : Optional[int] = torch.Tensor(_a ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
A_ : List[str] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
A_ : Any = self.scheduler.alphas_cumprod[t]
A_ : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
A_ : str = 1 - alpha_prod_t
A_ : List[str] = self.unet(_a ,_a )["""sample"""]
A_ : str = (1 - alpha_prod_t_prev) ** 0.5 * model_output
A_ : Union[str, Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
A_ : Optional[int] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _a ( _a : torch.Tensor ,_a : torch.Tensor ,_a : float ):
'''simple docstring'''
A_ : List[Any] = acos(torch.dot(torch.flatten(_a ) ,torch.flatten(_a ) ) / torch.norm(_a ) / torch.norm(_a ) )
return sin((1 - alpha) * theta ) * xa / sin(_a ) + sin(alpha * theta ) * xa / sin(_a )
| 665 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : List[str] , lowerCamelCase : List[Any]):
# Construct model
if gpta_config_file == "":
A_ : Optional[Any] = GPTaConfig()
else:
A_ : Optional[Any] = GPTaConfig.from_json_file(lowerCamelCase)
A_ : Dict = GPTaModel(lowerCamelCase)
# Load weights from numpy
load_tf_weights_in_gpta(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Save pytorch-model
A_ : int = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
A_ : Dict = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}')
torch.save(model.state_dict() , lowerCamelCase)
print(F'Save configuration file to {pytorch_config_dump_path}')
with open(lowerCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
__magic_name__ = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 665 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__magic_name__ = 16
__magic_name__ = 32
def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16):
A_ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""")
A_ : str = load_dataset("""glue""" , """mrpc""")
def tokenize_function(lowerCamelCase : Dict):
# max_length=None => use the model max length (it's actually the default)
A_ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A_ : Tuple = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(lowerCamelCase : Tuple):
# On TPU it's best to pad everything to the same length or training will be very slow.
A_ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A_ : List[Any] = 16
elif accelerator.mixed_precision != "no":
A_ : Any = 8
else:
A_ : Tuple = None
return tokenizer.pad(
lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
A_ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=lowerCamelCase)
A_ : str = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Dict):
# Initialize accelerator
A_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : List[Any] = config["""lr"""]
A_ : List[Any] = int(config["""num_epochs"""])
A_ : int = int(config["""seed"""])
A_ : Dict = int(config["""batch_size"""])
A_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""")
# If the batch size is too big we use gradient accumulation
A_ : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A_ : Any = batch_size // MAX_GPU_BATCH_SIZE
A_ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(lowerCamelCase)
A_ , A_ : List[str] = get_dataloaders(lowerCamelCase , lowerCamelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ : str = model.to(accelerator.device)
# Instantiate optimizer
A_ : str = AdamW(params=model.parameters() , lr=lowerCamelCase)
# Instantiate scheduler
A_ : Tuple = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ , A_ , A_ , A_ , A_ : Union[str, Any] = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Now we train the model
for epoch in range(lowerCamelCase):
model.train()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
A_ : Optional[int] = model(**lowerCamelCase)
A_ : List[Any] = outputs.loss
A_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
A_ : Union[str, Any] = model(**lowerCamelCase)
A_ : Any = outputs.logits.argmax(dim=-1)
A_ , A_ : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]))
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
A_ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase)
def lowerCamelCase ( ):
A_ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""")
parser.add_argument(
"""--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""")
A_ : Dict = parser.parse_args()
A_ : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """wavlm"""
def __init__( self : List[Any] ,_a : Dict=32 ,_a : List[Any]=768 ,_a : Tuple=12 ,_a : Union[str, Any]=12 ,_a : Dict=3072 ,_a : int="gelu" ,_a : Tuple=0.1 ,_a : Optional[Any]=0.1 ,_a : int=0.1 ,_a : Optional[int]=0.0 ,_a : str=0.1 ,_a : Union[str, Any]=0.1 ,_a : Tuple=0.02 ,_a : Union[str, Any]=1e-5 ,_a : Any="group" ,_a : Optional[int]="gelu" ,_a : Dict=(512, 512, 512, 512, 512, 512, 512) ,_a : str=(5, 2, 2, 2, 2, 2, 2) ,_a : List[str]=(10, 3, 3, 3, 3, 2, 2) ,_a : Optional[int]=False ,_a : Union[str, Any]=128 ,_a : List[str]=16 ,_a : List[str]=320 ,_a : Tuple=800 ,_a : Optional[Any]=False ,_a : str=True ,_a : Optional[int]=0.05 ,_a : Union[str, Any]=10 ,_a : Tuple=2 ,_a : List[Any]=0.0 ,_a : int=10 ,_a : Optional[int]=320 ,_a : int=2 ,_a : Optional[int]=0.1 ,_a : Dict=100 ,_a : List[Any]=256 ,_a : List[str]=256 ,_a : List[str]=0.1 ,_a : Any="mean" ,_a : Tuple=False ,_a : Optional[Any]=False ,_a : int=256 ,_a : Union[str, Any]=(512, 512, 512, 512, 1500) ,_a : Optional[Any]=(5, 3, 3, 1, 1) ,_a : Union[str, Any]=(1, 2, 3, 1, 1) ,_a : str=512 ,_a : int=80 ,_a : str=0 ,_a : Optional[Any]=1 ,_a : Any=2 ,_a : int=False ,_a : Optional[int]=3 ,_a : Union[str, Any]=2 ,_a : Any=3 ,_a : Tuple=None ,**_a : Any ,):
'''simple docstring'''
super().__init__(**_a ,pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a )
A_ : Union[str, Any] = hidden_size
A_ : str = feat_extract_norm
A_ : List[str] = feat_extract_activation
A_ : Tuple = list(_a )
A_ : int = list(_a )
A_ : str = list(_a )
A_ : List[Any] = conv_bias
A_ : Dict = num_buckets
A_ : Optional[int] = max_bucket_distance
A_ : Any = num_conv_pos_embeddings
A_ : Optional[Any] = num_conv_pos_embedding_groups
A_ : List[Any] = len(self.conv_dim )
A_ : List[str] = num_hidden_layers
A_ : int = intermediate_size
A_ : int = hidden_act
A_ : Any = num_attention_heads
A_ : Any = hidden_dropout
A_ : Optional[int] = attention_dropout
A_ : Optional[int] = activation_dropout
A_ : Any = feat_proj_dropout
A_ : Any = final_dropout
A_ : Tuple = layerdrop
A_ : List[str] = layer_norm_eps
A_ : List[Any] = initializer_range
A_ : Union[str, Any] = num_ctc_classes
A_ : Dict = vocab_size
A_ : List[str] = do_stable_layer_norm
A_ : Tuple = use_weighted_layer_sum
A_ : str = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A_ : Any = apply_spec_augment
A_ : int = mask_time_prob
A_ : Optional[int] = mask_time_length
A_ : int = mask_time_min_masks
A_ : str = mask_feature_prob
A_ : Dict = mask_feature_length
# parameters for pretraining with codevector quantized representations
A_ : Optional[Any] = num_codevectors_per_group
A_ : Optional[int] = num_codevector_groups
A_ : int = contrastive_logits_temperature
A_ : Optional[Any] = num_negatives
A_ : Optional[int] = codevector_dim
A_ : int = proj_codevector_dim
A_ : int = diversity_loss_weight
# ctc loss
A_ : Optional[Any] = ctc_loss_reduction
A_ : Tuple = ctc_zero_infinity
# adapter
A_ : List[Any] = add_adapter
A_ : Union[str, Any] = adapter_kernel_size
A_ : Tuple = adapter_stride
A_ : Any = num_adapter_layers
A_ : Optional[int] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A_ : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A_ : str = list(_a )
A_ : str = list(_a )
A_ : Optional[int] = list(_a )
A_ : Optional[Any] = xvector_output_dim
@property
def _a ( self : Tuple ):
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 665 |
'''simple docstring'''
import functools
def lowerCamelCase ( lowerCamelCase : list[int] , lowerCamelCase : list[int]):
# Validation
if not isinstance(lowerCamelCase , lowerCamelCase) or not all(isinstance(lowerCamelCase , lowerCamelCase) for day in days):
raise ValueError("""The parameter days should be a list of integers""")
if len(lowerCamelCase) != 3 or not all(isinstance(lowerCamelCase , lowerCamelCase) for cost in costs):
raise ValueError("""The parameter costs should be a list of three integers""")
if len(lowerCamelCase) == 0:
return 0
if min(lowerCamelCase) <= 0:
raise ValueError("""All days elements should be greater than 0""")
if max(lowerCamelCase) >= 366:
raise ValueError("""All days elements should be less than 366""")
A_ : Tuple = set(lowerCamelCase)
@functools.cache
def dynamic_programming(lowerCamelCase : int) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1)
return min(
costs[0] + dynamic_programming(index + 1) , costs[1] + dynamic_programming(index + 7) , costs[2] + dynamic_programming(index + 30) , )
return dynamic_programming(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
from math import sqrt
def lowerCamelCase ( lowerCamelCase : int):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCamelCase) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase ( lowerCamelCase : int = 1_0001):
A_ : Union[str, Any] = 0
A_ : Optional[Any] = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCamelCase):
count += 1
while count != nth:
number += 2
if is_prime(lowerCamelCase):
count += 1
return number
if __name__ == "__main__":
print(f"""{solution() = }""")
| 665 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCamelCase ( lowerCamelCase : NDArray[floataa] , lowerCamelCase : NDArray[floataa] , lowerCamelCase : list[int] , lowerCamelCase : int , ):
A_ , A_ : int = coefficient_matrix.shape
A_ , A_ : Union[str, Any] = constant_matrix.shape
if rowsa != colsa:
A_ : Any = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if colsa != 1:
A_ : Tuple = F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if rowsa != rowsa:
A_ : Dict = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(lowerCamelCase)
if len(lowerCamelCase) != rowsa:
A_ : Union[str, Any] = (
"""Number of initial values must be equal to number of rows in coefficient """
F'matrix but received {len(lowerCamelCase)} and {rowsa}'
)
raise ValueError(lowerCamelCase)
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""")
A_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1)
A_ , A_ : int = table.shape
strictly_diagonally_dominant(lowerCamelCase)
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase):
A_ : List[Any] = []
for row in range(lowerCamelCase):
A_ : int = 0
for col in range(lowerCamelCase):
if col == row:
A_ : List[str] = table[row][col]
elif col == cols - 1:
A_ : str = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ : Union[str, Any] = (temp + val) / denom
new_val.append(lowerCamelCase)
A_ : Tuple = new_val
return [float(lowerCamelCase) for i in new_val]
def lowerCamelCase ( lowerCamelCase : NDArray[floataa]):
A_ , A_ : Dict = table.shape
A_ : Union[str, Any] = True
for i in range(0 , lowerCamelCase):
A_ : str = 0
for j in range(0 , cols - 1):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""")
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = KandinskyVaaImgaImgPipeline
a_ = ["""image_embeds""", """negative_image_embeds""", """image"""]
a_ = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
a_ = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a_ = False
@property
def _a ( self : str ):
'''simple docstring'''
return 32
@property
def _a ( self : List[str] ):
'''simple docstring'''
return 32
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
return self.time_input_dim
@property
def _a ( self : List[Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _a ( self : int ):
'''simple docstring'''
return 100
@property
def _a ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Optional[Any] = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A_ : Dict = UNetaDConditionModel(**_a )
return model
@property
def _a ( self : List[Any] ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _a ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : int = self.dummy_unet
A_ : Optional[Any] = self.dummy_movq
A_ : Dict = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
A_ : int = DDIMScheduler(**_a )
A_ : List[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _a ( self : Any ,_a : Tuple ,_a : List[Any]=0 ):
'''simple docstring'''
A_ : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_a ) ).to(_a )
A_ : Tuple = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
_a )
# create init_image
A_ : List[str] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a )
A_ : str = image.cpu().permute(0 ,2 ,3 ,1 )[0]
A_ : Any = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((256, 256) )
if str(_a ).startswith("""mps""" ):
A_ : Optional[int] = torch.manual_seed(_a )
else:
A_ : Tuple = torch.Generator(device=_a ).manual_seed(_a )
A_ : int = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def _a ( self : Any ):
'''simple docstring'''
A_ : Optional[int] = """cpu"""
A_ : Tuple = self.get_dummy_components()
A_ : List[Any] = self.pipeline_class(**_a )
A_ : Dict = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Tuple = pipe(**self.get_dummy_inputs(_a ) )
A_ : Optional[int] = output.images
A_ : Dict = pipe(
**self.get_dummy_inputs(_a ) ,return_dict=_a ,)[0]
A_ : List[Any] = image[0, -3:, -3:, -1]
A_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : Tuple = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
A_ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
A_ : Tuple = """A red cartoon frog, 4k"""
A_ : List[str] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(_a )
A_ : Tuple = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" ,torch_dtype=torch.floataa )
A_ : Any = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
A_ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A_ , A_ : Optional[int] = pipe_prior(
_a ,generator=_a ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple()
A_ : str = pipeline(
image=_a ,image_embeds=_a ,negative_image_embeds=_a ,generator=_a ,num_inference_steps=100 ,height=768 ,width=768 ,strength=0.2 ,output_type="""np""" ,)
A_ : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_a ,_a )
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
A_ : Any = len(lowerCamelCase)
A_ : Optional[Any] = len(lowerCamelCase)
A_ : Optional[int] = [[False for _ in range(m + 1)] for _ in range(n + 1)]
A_ : Union[str, Any] = True
for i in range(lowerCamelCase):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A_ : Optional[int] = True
if a[i].islower():
A_ : List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
__magic_name__ = (3, 9, -11, 0, 7, 5, 1, -1)
__magic_name__ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
a_ = 42
a_ = 42
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] ,_a : Iterable[int] ):
'''simple docstring'''
A_ : Node | None = None
for i in sorted(_a ,reverse=_a ):
A_ : List[str] = Node(_a ,self.head )
def __iter__( self : Union[str, Any] ):
'''simple docstring'''
A_ : List[str] = self.head
while node:
yield node.data
A_ : Tuple = node.next_node
def __len__( self : Any ):
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self : Tuple ):
'''simple docstring'''
return " -> ".join([str(_a ) for node in self] )
def lowerCamelCase ( lowerCamelCase : SortedLinkedList , lowerCamelCase : SortedLinkedList):
return SortedLinkedList(list(lowerCamelCase) + list(lowerCamelCase))
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 665 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
a_ = 42
a_ = 42
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : list[list[Edge]] = [[] for _ in range(_a )]
A_ : List[Any] = size
def __getitem__( self : int ,_a : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _a ( self : str ):
'''simple docstring'''
return self._size
def _a ( self : str ,_a : int ,_a : int ,_a : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(_a ,_a ) )
def _a ( self : Dict ,_a : int ,_a : int ):
'''simple docstring'''
A_ : Tuple = deque([start_vertex] )
A_ : list[int | None] = [None] * self.size
A_ : Union[str, Any] = 0
while queue:
A_ : List[Any] = queue.popleft()
A_ : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A_ : Union[str, Any] = current_distance + edge.weight
A_ : Optional[Any] = distances[edge.destination_vertex]
if (
isinstance(_a ,_a )
and new_distance >= dest_vertex_distance
):
continue
A_ : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
from math import ceil, sqrt
def lowerCamelCase ( lowerCamelCase : int = 100_0000):
A_ : List[str] = 0
for outer_width in range(3 , (limit // 4) + 2):
if outer_width**2 > limit:
A_ : int = max(ceil(sqrt(outer_width**2 - limit)) , 1)
else:
A_ : List[str] = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int = 10**9):
A_ : Optional[int] = 1
A_ : int = 2
A_ : List[Any] = 0
A_ : Optional[Any] = 0
A_ : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
A_ : Optional[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 665 | 1 |
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def lowerCamelCase ( lowerCamelCase : Dict):
A_ : Tuple = int(lowerCamelCase)
A_ , A_ , A_ : int = t // 3600, (t // 60) % 60, t % 60
return F'{h}:{m:02d}:{s:02d}' if h != 0 else F'{m:02d}:{s:02d}'
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : Any=300):
# docstyle-ignore
return F'\n <div>\n {prefix}\n <progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>\n {label}\n </div>\n '
def lowerCamelCase ( lowerCamelCase : Optional[int]):
A_ : List[Any] = """<table border=\"1\" class=\"dataframe\">\n"""
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F' <th>{i}</th>\n'
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
A_ : Dict = F'{elt:.6f}' if isinstance(lowerCamelCase , lowerCamelCase) else str(lowerCamelCase)
html_code += F' <td>{elt}</td>\n'
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class __lowerCAmelCase :
'''simple docstring'''
a_ = 5
a_ = 0.2
def __init__( self : str ,_a : int ,_a : Optional[str] = None ,_a : bool = True ,_a : Optional["NotebookTrainingTracker"] = None ,_a : int = 300 ,):
'''simple docstring'''
A_ : List[Any] = total
A_ : List[Any] = """""" if prefix is None else prefix
A_ : Any = leave
A_ : List[str] = parent
A_ : List[str] = width
A_ : Dict = None
A_ : Tuple = None
A_ : Tuple = None
def _a ( self : List[str] ,_a : int ,_a : bool = False ,_a : str = None ):
'''simple docstring'''
A_ : int = value
if comment is not None:
A_ : str = comment
if self.last_value is None:
A_ : Optional[int] = time.time()
A_ : Optional[Any] = value
A_ : Any = None
A_ : int = self.warmup
A_ : List[str] = 1
self.update_bar(_a )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for ,self.total ):
if self.first_calls > 0:
self.first_calls -= 1
A_ : List[Any] = time.time()
A_ : Optional[Any] = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
A_ : int = self.elapsed_time / (value - self.start_value)
else:
A_ : List[Any] = None
if value >= self.total:
A_ : List[Any] = self.total
A_ : List[Any] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
A_ : Optional[Any] = self.average_time_per_item * (self.total - value)
self.update_bar(_a )
A_ : Dict = value
A_ : Dict = current_time
if self.average_time_per_item is None:
A_ : Optional[int] = 1
else:
A_ : Any = max(int(self.update_every / self.average_time_per_item ) ,1 )
def _a ( self : Optional[Any] ,_a : Tuple ,_a : Union[str, Any]=None ):
'''simple docstring'''
A_ : Any = """ """ * (len(str(self.total ) ) - len(str(_a ) )) + str(_a )
if self.elapsed_time is None:
A_ : List[str] = f'[{spaced_value}/{self.total} : < :'
elif self.predicted_remaining is None:
A_ : int = f'[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'
else:
A_ : List[str] = (
f'[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'
f' {format_time(self.predicted_remaining )}'
)
self.label += f', {1/self.average_time_per_item:.2f} it/s'
self.label += "]" if self.comment is None or len(self.comment ) == 0 else f', {self.comment}]'
self.display()
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : int = html_progress_bar(self.value ,self.total ,self.prefix ,self.label ,self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
A_ : List[Any] = disp.display(disp.HTML(self.html_code ) ,display_id=_a )
else:
self.output.update(disp.HTML(self.html_code ) )
def _a ( self : int ):
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML("""""" ) )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[str] ,_a : Dict ,_a : Union[str, Any]=None ):
'''simple docstring'''
super().__init__(_a )
A_ : Optional[int] = None if column_names is None else [column_names]
A_ : str = None
def _a ( self : Dict ):
'''simple docstring'''
A_ : Any = html_progress_bar(self.value ,self.total ,self.prefix ,self.label ,self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
A_ : Optional[Any] = disp.display(disp.HTML(self.html_code ) ,display_id=_a )
else:
self.output.update(disp.HTML(self.html_code ) )
def _a ( self : Dict ,_a : Union[str, Any] ):
'''simple docstring'''
if self.inner_table is None:
A_ : List[str] = [list(values.keys() ), list(values.values() )]
else:
A_ : int = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(_a )
A_ : Optional[int] = columns
self.inner_table.append([values[c] for c in columns] )
def _a ( self : Dict ,_a : Optional[int] ,_a : Optional[Any]=None ,_a : int=300 ):
'''simple docstring'''
A_ : Any = NotebookProgressBar(_a ,prefix=_a ,parent=self ,width=_a )
return self.child_bar
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = None
self.display()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ):
'''simple docstring'''
A_ : int = None
A_ : Optional[int] = None
A_ : Tuple = False
def _a ( self : Optional[Any] ,_a : str ,_a : Optional[Any] ,_a : Dict ,**_a : List[Any] ):
'''simple docstring'''
A_ : Tuple = """Epoch""" if args.evaluation_strategy == IntervalStrategy.EPOCH else """Step"""
A_ : Any = 0
A_ : List[Any] = 0
A_ : str = [self.first_column] + ["""Training Loss"""]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("""Validation Loss""" )
A_ : int = NotebookTrainingTracker(state.max_steps ,_a )
def _a ( self : Any ,_a : Any ,_a : str ,_a : Dict ,**_a : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[int] = int(state.epoch ) if int(state.epoch ) == state.epoch else f'{state.epoch:.2f}'
self.training_tracker.update(
state.global_step + 1 ,comment=f'Epoch {epoch}/{state.num_train_epochs}' ,force_update=self._force_next_update ,)
A_ : int = False
def _a ( self : Optional[Any] ,_a : Optional[Any] ,_a : Union[str, Any] ,_a : Union[str, Any] ,_a : List[Any]=None ,**_a : str ):
'''simple docstring'''
if not has_length(_a ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
A_ : str = self.training_tracker.add_child(len(_a ) )
else:
A_ : List[Any] = NotebookProgressBar(len(_a ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def _a ( self : Tuple ,_a : Optional[int] ,_a : int ,_a : Union[str, Any] ,**_a : Dict ):
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
A_ : str = None
def _a ( self : Tuple ,_a : List[str] ,_a : Any ,_a : List[str] ,_a : int=None ,**_a : List[Any] ):
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
A_ : Optional[Any] = {"""Training Loss""": logs["""loss"""]}
# First column is necessarily Step sine we're not in epoch eval strategy
A_ : List[Any] = state.global_step
self.training_tracker.write_line(_a )
def _a ( self : Optional[Any] ,_a : str ,_a : Tuple ,_a : Optional[int] ,_a : Optional[Any]=None ,**_a : Dict ):
'''simple docstring'''
if self.training_tracker is not None:
A_ : int = {"""Training Loss""": """No log""", """Validation Loss""": """No log"""}
for log in reversed(state.log_history ):
if "loss" in log:
A_ : List[str] = log["""loss"""]
break
if self.first_column == "Epoch":
A_ : List[Any] = int(state.epoch )
else:
A_ : Tuple = state.global_step
A_ : int = """eval"""
for k in metrics:
if k.endswith("""_loss""" ):
A_ : List[str] = re.sub(r"""\_loss$""" ,"""""" ,_a )
A_ : str = metrics.pop("""total_flos""" ,_a )
A_ : List[str] = metrics.pop("""epoch""" ,_a )
A_ : Optional[int] = metrics.pop(f'{metric_key_prefix}_runtime' ,_a )
A_ : List[str] = metrics.pop(f'{metric_key_prefix}_samples_per_second' ,_a )
A_ : Union[str, Any] = metrics.pop(f'{metric_key_prefix}_steps_per_second' ,_a )
A_ : str = metrics.pop(f'{metric_key_prefix}_jit_compilation_time' ,_a )
for k, v in metrics.items():
if k == f'{metric_key_prefix}_loss':
A_ : str = v
else:
A_ : List[Any] = k.split("""_""" )
A_ : Union[str, Any] = """ """.join([part.capitalize() for part in splits[1:]] )
A_ : Tuple = v
self.training_tracker.write_line(_a )
self.training_tracker.remove_child()
A_ : Optional[Any] = None
# Evaluation takes a long time so we should force the next update.
A_ : Optional[int] = True
def _a ( self : Optional[int] ,_a : Optional[Any] ,_a : Optional[int] ,_a : int ,**_a : List[Any] ):
'''simple docstring'''
self.training_tracker.update(
state.global_step ,comment=f'Epoch {int(state.epoch )}/{state.num_train_epochs}' ,force_update=_a )
A_ : Optional[int] = None
| 665 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase ( ):
A_ : Optional[int] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCamelCase)
A_ : Optional[int] = parser.add_subparsers(help="""accelerate command helpers""")
# Register commands
get_config_parser(subparsers=lowerCamelCase)
env_command_parser(subparsers=lowerCamelCase)
launch_command_parser(subparsers=lowerCamelCase)
tpu_command_parser(subparsers=lowerCamelCase)
test_command_parser(subparsers=lowerCamelCase)
# Let's go
A_ : Dict = parser.parse_args()
if not hasattr(lowerCamelCase , """func"""):
parser.print_help()
exit(1)
# Run
args.func(lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int = 10 , lowerCamelCase : int = 22):
A_ : Union[str, Any] = range(1 , lowerCamelCase)
A_ : List[str] = range(1 , lowerCamelCase)
return sum(
1 for power in powers for base in bases if len(str(base**power)) == power)
if __name__ == "__main__":
print(f"""{solution(10, 22) = }""")
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
__magic_name__ = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : Any):
for attribute in key.split("""."""):
A_ : Optional[Any] = getattr(lowerCamelCase , lowerCamelCase)
if weight_type is not None:
A_ : Any = getattr(lowerCamelCase , lowerCamelCase).shape
else:
A_ : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}')
if weight_type == "weight":
A_ : Union[str, Any] = value
elif weight_type == "weight_g":
A_ : List[str] = value
elif weight_type == "weight_v":
A_ : str = value
elif weight_type == "bias":
A_ : int = value
else:
A_ : Dict = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.')
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : int):
A_ : str = []
A_ : List[Any] = fairseq_model.state_dict()
A_ : Optional[Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
A_ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
A_ : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
A_ : Optional[Any] = """unispeech_sat.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""")[-1] == name.split(""".""")[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(""".""")[:-1]) != key):
# special case since naming is very similar
continue
A_ : List[Any] = True
if "*" in mapped_key:
A_ : Optional[Any] = name.split(lowerCamelCase)[0].split(""".""")[-2]
A_ : Optional[Any] = mapped_key.replace("""*""" , lowerCamelCase)
if "weight_g" in name:
A_ : Tuple = """weight_g"""
elif "weight_v" in name:
A_ : Optional[int] = """weight_v"""
elif "bias" in name:
A_ : Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A_ : str = """weight"""
else:
A_ : Dict = None
set_recursively(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
continue
if not is_used:
unused_weights.append(lowerCamelCase)
logger.warning(F'Unused weights: {unused_weights}')
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : List[str] , lowerCamelCase : Dict):
A_ : List[str] = full_name.split("""conv_layers.""")[-1]
A_ : List[str] = name.split(""".""")
A_ : int = int(items[0])
A_ : Any = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.')
A_ : int = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.')
A_ : Union[str, Any] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.')
A_ : Union[str, Any] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.')
A_ : List[str] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
else:
unused_weights.append(lowerCamelCase)
@torch.no_grad()
def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : Any , lowerCamelCase : Dict=None , lowerCamelCase : List[Any]=None , lowerCamelCase : Optional[int]=True):
if config_path is not None:
A_ : Optional[int] = UniSpeechSatConfig.from_pretrained(lowerCamelCase)
else:
A_ : Optional[int] = UniSpeechSatConfig()
A_ : List[Any] = """"""
if is_finetuned:
A_ : Optional[Any] = UniSpeechSatForCTC(lowerCamelCase)
else:
A_ : Optional[Any] = UniSpeechSatForPreTraining(lowerCamelCase)
A_ , A_ , A_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""")[:-1])})
A_ : List[str] = model[0].eval()
recursively_load_weights(lowerCamelCase , lowerCamelCase)
hf_wavavec.save_pretrained(lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__magic_name__ = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['YolosFeatureExtractor']
__magic_name__ = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : str ,**_a : Optional[Any] ):
'''simple docstring'''
requires_backends(self ,["""bs4"""] )
super().__init__(**_a )
def _a ( self : Tuple ,_a : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = []
A_ : Tuple = []
A_ : Tuple = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
A_ : Optional[Any] = parent.find_all(child.name ,recursive=_a )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_a ) else next(i for i, s in enumerate(_a ,1 ) if s is child ) )
A_ : Any = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _a ( self : Any ,_a : str ):
'''simple docstring'''
A_ : str = BeautifulSoup(_a ,"""html.parser""" )
A_ : Optional[int] = []
A_ : List[Any] = []
A_ : Dict = []
for element in html_code.descendants:
if type(_a ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
A_ : int = html.unescape(_a ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_a )
A_ , A_ : Optional[int] = self.xpath_soup(_a )
stringaxtag_seq.append(_a )
stringaxsubs_seq.append(_a )
if len(_a ) != len(_a ):
raise ValueError("""Number of doc strings and xtags does not correspond""" )
if len(_a ) != len(_a ):
raise ValueError("""Number of doc strings and xsubs does not correspond""" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _a ( self : Any ,_a : Tuple ,_a : Optional[int] ):
'''simple docstring'''
A_ : str = """"""
for tagname, subs in zip(_a ,_a ):
xpath += f'/{tagname}'
if subs != 0:
xpath += f'[{subs}]'
return xpath
def __call__( self : Tuple ,_a : str ):
'''simple docstring'''
A_ : Optional[Any] = False
# Check that strings has a valid type
if isinstance(_a ,_a ):
A_ : Any = True
elif isinstance(_a ,(list, tuple) ):
if len(_a ) == 0 or isinstance(html_strings[0] ,_a ):
A_ : Optional[int] = True
if not valid_strings:
raise ValueError(
"""HTML strings must of type `str`, `List[str]` (batch of examples), """
f'but is of type {type(_a )}.' )
A_ : List[Any] = bool(isinstance(_a ,(list, tuple) ) and (isinstance(html_strings[0] ,_a )) )
if not is_batched:
A_ : Optional[int] = [html_strings]
# Get nodes + xpaths
A_ : Optional[int] = []
A_ : Any = []
for html_string in html_strings:
A_ , A_ , A_ : List[Any] = self.get_three_from_single(_a )
nodes.append(_a )
A_ : Optional[Any] = []
for node, tag_list, sub_list in zip(_a ,_a ,_a ):
A_ : List[Any] = self.construct_xpath(_a ,_a )
xpath_strings.append(_a )
xpaths.append(_a )
# return as Dict
A_ : Optional[Any] = {"""nodes""": nodes, """xpaths""": xpaths}
A_ : str = BatchFeature(data=_a ,tensor_type=_a )
return encoded_inputs
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : str = [0] * len(lowerCamelCase)
A_ : Union[str, Any] = []
A_ : Union[str, Any] = []
A_ : Tuple = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCamelCase)):
if indegree[i] == 0:
queue.append(lowerCamelCase)
while queue:
A_ : Any = queue.pop(0)
cnt += 1
topo.append(lowerCamelCase)
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowerCamelCase)
if cnt != len(lowerCamelCase):
print("""Cycle exists""")
else:
print(lowerCamelCase)
# Adjacency List of Graph
__magic_name__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 665 | 1 |
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any]=[]):
A_ : Any = size[0] - overlap_pixels * 2
A_ : Optional[Any] = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
A_ : List[str] = np.ones((size_y, size_x) , dtype=np.uinta) * 255
A_ : List[Any] = np.pad(lowerCamelCase , mode="""linear_ramp""" , pad_width=lowerCamelCase , end_values=0)
if "l" in remove_borders:
A_ : Optional[int] = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
A_ : Optional[Any] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
A_ : List[Any] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
A_ : Optional[int] = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : List[Any]):
return max(lowerCamelCase , min(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : [int] , lowerCamelCase : [int] , lowerCamelCase : [int]):
return (
clamp(rect[0] , min[0] , max[0]),
clamp(rect[1] , min[1] , max[1]),
clamp(rect[2] , min[0] , max[0]),
clamp(rect[3] , min[1] , max[1]),
)
def lowerCamelCase ( lowerCamelCase : [int] , lowerCamelCase : int , lowerCamelCase : [int]):
A_ : List[Any] = list(lowerCamelCase)
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
A_ : Any = clamp_rect(lowerCamelCase , [0, 0] , [image_size[0], image_size[1]])
return rect
def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : List[Any] , lowerCamelCase : List[Any] , lowerCamelCase : List[Any]):
A_ : Union[str, Any] = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]))
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1])) , (0, 0) , )
result.paste(lowerCamelCase , (original_slice, 0))
return result
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Tuple):
A_ : List[Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
A_ : List[Any] = tile.crop(lowerCamelCase)
return tile
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Optional[Any]):
A_ : Optional[int] = n % d
return n - divisor
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_a : AutoencoderKL ,_a : CLIPTextModel ,_a : CLIPTokenizer ,_a : UNetaDConditionModel ,_a : DDPMScheduler ,_a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] ,_a : int = 350 ,):
'''simple docstring'''
super().__init__(
vae=_a ,text_encoder=_a ,tokenizer=_a ,unet=_a ,low_res_scheduler=_a ,scheduler=_a ,max_noise_level=_a ,)
def _a ( self : Tuple ,_a : Union[str, Any] ,_a : Any ,_a : Optional[int] ,_a : int ,_a : int ,_a : Any ,_a : Tuple ,**_a : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = (
min(image.size[0] - (tile_size + original_image_slice) ,x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) ,y * tile_size ),
min(image.size[0] ,(x + 1) * tile_size ),
min(image.size[1] ,(y + 1) * tile_size ),
)
A_ : Optional[int] = add_overlap_rect(_a ,_a ,image.size )
A_ : int = image.crop(_a )
A_ : int = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
A_ : List[Any] = translated_slice_x - (original_image_slice / 2)
A_ : int = max(0 ,_a )
A_ : List[Any] = squeeze_tile(_a ,_a ,_a ,_a )
A_ : Tuple = to_input.size
A_ : Optional[Any] = to_input.resize((tile_size, tile_size) ,Image.BICUBIC )
A_ : Any = super(_a ,self ).__call__(image=_a ,**_a ).images[0]
A_ : Tuple = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) ,Image.BICUBIC )
A_ : Optional[Any] = unsqueeze_tile(_a ,_a )
A_ : List[str] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) ,Image.BICUBIC )
A_ : List[Any] = []
if x == 0:
remove_borders.append("""l""" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("""r""" )
if y == 0:
remove_borders.append("""t""" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("""b""" )
A_ : List[Any] = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) ,tile_border * 4 ,remove_borders=_a ) ,mode="""L""" ,)
final_image.paste(
_a ,(crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) ,_a )
@torch.no_grad()
def __call__( self : Optional[int] ,_a : Union[str, List[str]] ,_a : Union[PIL.Image.Image, List[PIL.Image.Image]] ,_a : int = 75 ,_a : float = 9.0 ,_a : int = 50 ,_a : Optional[Union[str, List[str]]] = None ,_a : Optional[int] = 1 ,_a : float = 0.0 ,_a : Optional[torch.Generator] = None ,_a : Optional[torch.FloatTensor] = None ,_a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,_a : int = 1 ,_a : int = 128 ,_a : int = 32 ,_a : int = 32 ,):
'''simple docstring'''
A_ : List[Any] = Image.new("""RGB""" ,(image.size[0] * 4, image.size[1] * 4) )
A_ : Any = math.ceil(image.size[0] / tile_size )
A_ : str = math.ceil(image.size[1] / tile_size )
A_ : Optional[int] = tcx * tcy
A_ : Union[str, Any] = 0
for y in range(_a ):
for x in range(_a ):
self._process_tile(
_a ,_a ,_a ,_a ,_a ,_a ,_a ,prompt=_a ,num_inference_steps=_a ,guidance_scale=_a ,noise_level=_a ,negative_prompt=_a ,num_images_per_prompt=_a ,eta=_a ,generator=_a ,latents=_a ,)
current_count += 1
if callback is not None:
callback({"""progress""": current_count / total_tile_count, """image""": final_image} )
return final_image
def lowerCamelCase ( ):
# Run a demo
A_ : Union[str, Any] = """stabilityai/stable-diffusion-x4-upscaler"""
A_ : Optional[int] = StableDiffusionTiledUpscalePipeline.from_pretrained(lowerCamelCase , revision="""fp16""" , torch_dtype=torch.floataa)
A_ : Optional[int] = pipe.to("""cuda""")
A_ : Dict = Image.open("""../../docs/source/imgs/diffusers_library.jpg""")
def callback(lowerCamelCase : List[str]):
print(F'progress: {obj["progress"]:.4f}')
obj["image"].save("""diffusers_library_progress.jpg""")
A_ : Optional[int] = pipe(image=lowerCamelCase , prompt="""Black font, white background, vector""" , noise_level=40 , callback=lowerCamelCase)
final_image.save("""diffusers_library.jpg""")
if __name__ == "__main__":
main()
| 665 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[str]=7 ,_a : Dict=True ,_a : List[Any]=True ,_a : Dict=False ,_a : Optional[int]=True ,_a : List[Any]=99 ,_a : Any=32 ,_a : Optional[int]=5 ,_a : List[Any]=4 ,_a : int=37 ,_a : List[Any]="gelu" ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : Any=512 ,_a : int=16 ,_a : Optional[int]=2 ,_a : Any=0.02 ,_a : Any=3 ,_a : Any=4 ,_a : List[str]=None ,):
'''simple docstring'''
A_ : List[str] = parent
A_ : Any = batch_size
A_ : Tuple = seq_length
A_ : List[str] = is_training
A_ : Tuple = use_input_mask
A_ : Dict = use_token_type_ids
A_ : List[Any] = use_labels
A_ : Union[str, Any] = vocab_size
A_ : Any = hidden_size
A_ : str = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : str = intermediate_size
A_ : Tuple = hidden_act
A_ : Any = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : int = type_vocab_size
A_ : Union[str, Any] = type_sequence_label_size
A_ : Any = initializer_range
A_ : List[Any] = num_labels
A_ : Optional[Any] = num_choices
A_ : List[Any] = scope
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : int = None
if self.use_input_mask:
A_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Dict = None
if self.use_token_type_ids:
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : str = None
A_ : Any = None
A_ : str = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
A_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
def _a ( self : Union[str, Any] ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Any ,_a : Any ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Tuple ):
'''simple docstring'''
A_ : Any = LlamaModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[Any] = model(_a ,attention_mask=_a )
A_ : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Optional[int] ,_a : int ,_a : List[str] ,_a : Any ,_a : Any ,_a : Dict ,_a : List[str] ,_a : Optional[int] ,_a : Any ,_a : List[str] ,):
'''simple docstring'''
A_ : List[str] = True
A_ : Union[str, Any] = LlamaModel(_a )
model.to(_a )
model.eval()
A_ : Tuple = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,)
A_ : List[Any] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,)
A_ : int = model(_a ,attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Any ,_a : Any ,_a : Optional[int] ,_a : List[Any] ,_a : List[Any] ,_a : Dict ,_a : Tuple ,_a : Optional[int] ,_a : List[Any] ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : List[Any] = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
A_ : Dict = model(_a ,attention_mask=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : str ,_a : List[Any] ,_a : Dict ,_a : str ,_a : Tuple ,_a : Tuple ,_a : Tuple ,_a : Optional[Any] ,_a : Dict ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : Optional[Any] = True
A_ : Any = True
A_ : Tuple = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
A_ : Optional[int] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,use_cache=_a ,)
A_ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A_ : List[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
A_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
A_ : int = torch.cat([input_mask, next_mask] ,dim=-1 )
A_ : List[str] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
A_ : Any = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,past_key_values=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
# select random slice
A_ : List[str] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
A_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a ,_a ,atol=1e-3 ) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Any = config_and_inputs
A_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
a_ = (LlamaForCausalLM,) if is_torch_available() else ()
a_ = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = LlamaModelTester(self )
A_ : List[str] = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def _a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : Dict = type
self.model_tester.create_and_check_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = 3
A_ : Any = input_dict["""input_ids"""]
A_ : Union[str, Any] = input_ids.ne(1 ).to(_a )
A_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : int = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Dict ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : str = 3
A_ : Union[str, Any] = """single_label_classification"""
A_ : Union[str, Any] = input_dict["""input_ids"""]
A_ : List[Any] = input_ids.ne(1 ).to(_a )
A_ : Dict = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : List[str] = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = 3
A_ : Dict = """multi_label_classification"""
A_ : Any = input_dict["""input_ids"""]
A_ : Optional[Any] = input_ids.ne(1 ).to(_a )
A_ : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
A_ : Optional[int] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def _a ( self : Any ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _a ( self : Optional[Any] ,_a : List[Any] ):
'''simple docstring'''
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ids_tensor([1, 10] ,config.vocab_size )
A_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : int = LlamaModel(_a )
original_model.to(_a )
original_model.eval()
A_ : Tuple = original_model(_a ).last_hidden_state
A_ : Union[str, Any] = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : Tuple = {"""type""": scaling_type, """factor""": 10.0}
A_ : int = LlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
A_ : List[Any] = scaled_model(_a ).last_hidden_state
A_ : Any = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a ,_a ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Any = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" ,device_map="""auto""" )
A_ : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : str ):
'''simple docstring'''
A_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""auto""" )
A_ : int = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : Optional[int] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
A_ : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
A_ : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
A_ : List[str] = """Simply put, the theory of relativity states that """
A_ : Any = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
A_ : Union[str, Any] = tokenizer.encode(_a ,return_tensors="""pt""" )
A_ : List[str] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""sequential""" ,use_safetensors=_a )
# greedy generation outputs
A_ : str = model.generate(_a ,max_new_tokens=64 ,top_p=_a ,temperature=1 ,do_sample=_a )
A_ : Optional[Any] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=_a )
self.assertEqual(_a ,_a )
| 665 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
if isinstance(lowerCamelCase , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]):
return videos
elif isinstance(lowerCamelCase , (list, tuple)) and is_valid_image(videos[0]):
return [videos]
elif is_valid_image(lowerCamelCase):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}')
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""pixel_values"""]
def __init__( self : int ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BILINEAR ,_a : bool = True ,_a : Dict[str, int] = None ,_a : bool = True ,_a : Union[int, float] = 1 / 255 ,_a : bool = True ,_a : bool = True ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,**_a : int ,):
'''simple docstring'''
super().__init__(**_a )
A_ : str = size if size is not None else {"""shortest_edge""": 256}
A_ : Optional[int] = get_size_dict(_a ,default_to_square=_a )
A_ : Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Tuple = get_size_dict(_a ,param_name="""crop_size""" )
A_ : int = do_resize
A_ : Union[str, Any] = size
A_ : Optional[Any] = do_center_crop
A_ : Optional[int] = crop_size
A_ : List[Any] = resample
A_ : List[str] = do_rescale
A_ : Dict = rescale_factor
A_ : Optional[Any] = offset
A_ : Dict = do_normalize
A_ : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A_ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _a ( self : Any ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BILINEAR ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[Any] ,):
'''simple docstring'''
A_ : int = get_size_dict(_a ,default_to_square=_a )
if "shortest_edge" in size:
A_ : Optional[Any] = get_resize_output_image_size(_a ,size["""shortest_edge"""] ,default_to_square=_a )
elif "height" in size and "width" in size:
A_ : int = (size["""height"""], size["""width"""])
else:
raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(_a ,size=_a ,resample=_a ,data_format=_a ,**_a )
def _a ( self : List[str] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Dict ,):
'''simple docstring'''
A_ : Any = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(_a ,size=(size["""height"""], size["""width"""]) ,data_format=_a ,**_a )
def _a ( self : str ,_a : np.ndarray ,_a : Union[int, float] ,_a : bool = True ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[int] ,):
'''simple docstring'''
A_ : List[Any] = image.astype(np.floataa )
if offset:
A_ : int = image - (scale / 2)
return rescale(_a ,scale=_a ,data_format=_a ,**_a )
def _a ( self : Tuple ,_a : np.ndarray ,_a : Union[float, List[float]] ,_a : Union[float, List[float]] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[Any] ,):
'''simple docstring'''
return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a )
def _a ( self : Optional[Any] ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : Dict[str, int] = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[ChannelDimension] = ChannelDimension.FIRST ,):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
A_ : str = to_numpy_array(_a )
if do_resize:
A_ : Tuple = self.resize(image=_a ,size=_a ,resample=_a )
if do_center_crop:
A_ : Tuple = self.center_crop(_a ,size=_a )
if do_rescale:
A_ : Optional[Any] = self.rescale(image=_a ,scale=_a ,offset=_a )
if do_normalize:
A_ : List[str] = self.normalize(image=_a ,mean=_a ,std=_a )
A_ : List[str] = to_channel_dimension_format(_a ,_a )
return image
def _a ( self : Dict ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : Dict[str, int] = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[str, TensorType]] = None ,_a : ChannelDimension = ChannelDimension.FIRST ,**_a : List[str] ,):
'''simple docstring'''
A_ : str = do_resize if do_resize is not None else self.do_resize
A_ : Optional[Any] = resample if resample is not None else self.resample
A_ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : int = do_rescale if do_rescale is not None else self.do_rescale
A_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : str = offset if offset is not None else self.offset
A_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
A_ : List[str] = image_mean if image_mean is not None else self.image_mean
A_ : List[Any] = image_std if image_std is not None else self.image_std
A_ : Optional[Any] = size if size is not None else self.size
A_ : Any = get_size_dict(_a ,default_to_square=_a )
A_ : str = crop_size if crop_size is not None else self.crop_size
A_ : List[Any] = get_size_dict(_a ,param_name="""crop_size""" )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
A_ : List[Any] = make_batched(_a )
A_ : int = [
[
self._preprocess_image(
image=_a ,do_resize=_a ,size=_a ,resample=_a ,do_center_crop=_a ,crop_size=_a ,do_rescale=_a ,rescale_factor=_a ,offset=_a ,do_normalize=_a ,image_mean=_a ,image_std=_a ,data_format=_a ,)
for img in video
]
for video in videos
]
A_ : List[Any] = {"""pixel_values""": videos}
return BatchFeature(data=_a ,tensor_type=_a )
| 665 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__magic_name__ = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__magic_name__ = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__magic_name__ = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def _a ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) ,homepage="""https://github.com/hendrycks/math""" ,codebase_urls=["""https://github.com/hendrycks/math"""] ,)
def _a ( self : List[Any] ,_a : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = 0.0
for i, j in zip(_a ,_a ):
n_correct += 1.0 if math_equivalence.is_equiv(_a ,_a ) else 0.0
A_ : List[str] = n_correct / len(_a )
return {
"accuracy": accuracy,
}
| 665 | 1 |
'''simple docstring'''
from manim import *
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Dict = Rectangle(height=0.5 ,width=0.5 )
A_ : Optional[int] = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
A_ : Optional[int] = Rectangle(height=0.25 ,width=0.25 )
A_ : Any = [mem.copy() for i in range(6 )]
A_ : List[str] = [mem.copy() for i in range(6 )]
A_ : str = VGroup(*_a ).arrange(_a ,buff=0 )
A_ : Optional[int] = VGroup(*_a ).arrange(_a ,buff=0 )
A_ : int = VGroup(_a ,_a ).arrange(_a ,buff=0 )
A_ : int = Text("""CPU""" ,font_size=24 )
A_ : Union[str, Any] = Group(_a ,_a ).arrange(_a ,buff=0.5 ,aligned_edge=_a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_a )
A_ : List[str] = [mem.copy() for i in range(4 )]
A_ : Optional[Any] = VGroup(*_a ).arrange(_a ,buff=0 )
A_ : List[Any] = Text("""GPU""" ,font_size=24 )
A_ : Union[str, Any] = Group(_a ,_a ).arrange(_a ,buff=0.5 ,aligned_edge=_a )
gpu.move_to([-1, -1, 0] )
self.add(_a )
A_ : Tuple = [mem.copy() for i in range(6 )]
A_ : Optional[int] = VGroup(*_a ).arrange(_a ,buff=0 )
A_ : Union[str, Any] = Text("""Model""" ,font_size=24 )
A_ : Dict = Group(_a ,_a ).arrange(_a ,buff=0.5 ,aligned_edge=_a )
model.move_to([3, -1.0, 0] )
self.add(_a )
A_ : str = []
A_ : Union[str, Any] = []
for i, rect in enumerate(_a ):
A_ : Dict = fill.copy().set_fill(_a ,opacity=0.8 )
target.move_to(_a )
model_arr.append(_a )
A_ : List[Any] = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_a ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(_a )
self.add(*_a ,*_a )
A_ : Any = [meta_mem.copy() for i in range(6 )]
A_ : List[str] = [meta_mem.copy() for i in range(6 )]
A_ : Union[str, Any] = VGroup(*_a ).arrange(_a ,buff=0 )
A_ : Optional[int] = VGroup(*_a ).arrange(_a ,buff=0 )
A_ : str = VGroup(_a ,_a ).arrange(_a ,buff=0 )
A_ : List[Any] = Text("""Disk""" ,font_size=24 )
A_ : List[str] = Group(_a ,_a ).arrange(_a ,buff=0.5 ,aligned_edge=_a )
disk.move_to([-4, -1.25, 0] )
self.add(_a ,_a )
A_ : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A_ : Optional[Any] = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(_a ,_a )
A_ : Union[str, Any] = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' ,font_size=18 ,)
blue_text.next_to(_a ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(_a )
A_ : Optional[int] = MarkupText(
f'Now watch as an input is passed through the model\nand how the memory is utilized and handled.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(_a ) )
A_ : List[str] = Square(0.3 )
input.set_fill(_a ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,_a ,buff=0.5 )
self.play(Write(_a ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=_a ,buff=0.02 )
self.play(MoveToTarget(_a ) )
self.play(FadeOut(_a ) )
A_ : Dict = Arrow(start=_a ,end=_a ,color=_a ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,_a ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
A_ : Dict = MarkupText(
f'As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.' ,font_size=24 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(_a ,run_time=3 ) )
A_ : Optional[int] = {"""run_time""": 1, """fade_in""": True, """fade_out""": True, """buff""": 0.02}
self.play(
Write(_a ) ,Circumscribe(model_arr[0] ,color=_a ,**_a ) ,Circumscribe(model_cpu_arr[0] ,color=_a ,**_a ) ,Circumscribe(gpu_rect[0] ,color=_a ,**_a ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
A_ : Any = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,_a ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
A_ : int = AnimationGroup(
FadeOut(_a ,run_time=0.5 ) ,MoveToTarget(_a ,run_time=0.5 ) ,FadeIn(_a ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(_a )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
A_ : List[Any] = 0.7
self.play(
Circumscribe(model_arr[i] ,**_a ) ,Circumscribe(cpu_left_col_base[i] ,**_a ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=_a ,**_a ) ,Circumscribe(gpu_rect[0] ,color=_a ,**_a ) ,Circumscribe(model_arr[i + 1] ,color=_a ,**_a ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=_a ,**_a ) ,Circumscribe(cpu_left_col_base[-1] ,color=_a ,**_a ) ,Circumscribe(gpu_rect[0] ,color=_a ,**_a ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
A_ : Union[str, Any] = a_c
A_ : Any = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(_a ) ,FadeOut(_a ,run_time=0.5 ) ,)
A_ : Any = MarkupText(f'Inference on a model too large for GPU memory\nis successfully completed.' ,font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_a ,run_time=3 ) ,MoveToTarget(_a ) )
self.wait()
| 665 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """retribert"""
def __init__( self : int ,_a : Dict=30522 ,_a : List[Any]=768 ,_a : Optional[Any]=8 ,_a : str=12 ,_a : str=3072 ,_a : Tuple="gelu" ,_a : Optional[int]=0.1 ,_a : Dict=0.1 ,_a : List[Any]=512 ,_a : Union[str, Any]=2 ,_a : Tuple=0.02 ,_a : List[str]=1e-12 ,_a : Dict=True ,_a : Tuple=128 ,_a : Optional[int]=0 ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,**_a )
A_ : Dict = vocab_size
A_ : int = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : int = intermediate_size
A_ : Tuple = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : Optional[int] = initializer_range
A_ : Dict = layer_norm_eps
A_ : str = share_encoders
A_ : List[Any] = projection_dim
| 665 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : dict):
A_ : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
A_ : set[int] = set()
return any(
node not in visited and depth_first_search(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
for node in graph)
def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : int , lowerCamelCase : set , lowerCamelCase : set):
visited.add(lowerCamelCase)
rec_stk.add(lowerCamelCase)
for node in graph[vertex]:
if node not in visited:
if depth_first_search(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(lowerCamelCase)
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 665 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'spiece.model'}
__magic_name__ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
__magic_name__ = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = []
def __init__( self : Optional[int] ,_a : int ,_a : Optional[Any]="<unk>" ,_a : int="<s>" ,_a : str="</s>" ,_a : Optional[Any]="<pad>" ,_a : Tuple="[SEP]" ,_a : Tuple="[MASK]" ,_a : Union[str, Any]="[CLS]" ,_a : Optional[Dict[str, Any]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : List[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
A_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,pad_token=_a ,sep_token=_a ,mask_token=_a ,cls_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
A_ : Optional[int] = vocab_file
A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Tuple = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self.__dict__.copy()
A_ : Union[str, Any] = None
return state
def __setstate__( self : List[Any] ,_a : Any ):
'''simple docstring'''
A_ : Tuple = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
A_ : Tuple = {}
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self : Union[str, Any] ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def _a ( self : Optional[int] ,_a : str ):
'''simple docstring'''
return self.sp_model.piece_to_id(_a )
def _a ( self : int ,_a : Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.sp_model.IdToPiece(_a )
return token
def _a ( self : Dict ,_a : int ):
'''simple docstring'''
A_ : int = []
A_ : Any = """"""
A_ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
A_ : Dict = True
A_ : Union[str, Any] = []
else:
current_sub_tokens.append(_a )
A_ : str = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def _a ( self : int ,_a : List[int] ,_a : bool = False ,_a : bool = None ,_a : bool = True ,**_a : str ,):
'''simple docstring'''
A_ : Any = kwargs.pop("""use_source_tokenizer""" ,_a )
A_ : Union[str, Any] = self.convert_ids_to_tokens(_a ,skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A_ : str = []
A_ : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
A_ : List[str] = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
A_ : Optional[int] = re.sub(r""" (\[(MASK|SEP)\])""" ,r"""\1""" ,""" """.join(_a ) )
else:
A_ : Tuple = """""".join(_a )
A_ : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A_ : Optional[Any] = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,"""wb""" ) as fi:
A_ : str = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def _a ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : List[Any] = [self.cls_token_id]
A_ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def _a ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Tuple = [self.sep_token_id]
A_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 665 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _a ( self : List[str] ,_a : int ,_a : Any ,_a : int ):
'''simple docstring'''
A_ : Dict = TextaTextGenerationPipeline(model=_a ,tokenizer=_a )
return generator, ["Something to write", "Something else"]
def _a ( self : str ,_a : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : Any = generator("""Something there""" )
self.assertEqual(_a ,[{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
A_ : List[Any] = generator(["""This is great !""", """Something else"""] ,num_return_sequences=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
A_ : List[str] = generator(
["""This is great !""", """Something else"""] ,num_return_sequences=2 ,batch_size=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
with self.assertRaises(_a ):
generator(4 )
@require_torch
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : int = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""pt""" )
# do_sample=False necessary for reproducibility
A_ : Tuple = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
A_ : Optional[int] = 3
A_ : Tuple = generator(
"""Something there""" ,num_return_sequences=_a ,num_beams=_a ,)
A_ : Optional[Any] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a ,_a )
A_ : Optional[int] = generator("""This is a test""" ,do_sample=_a ,num_return_sequences=2 ,return_tensors=_a )
self.assertEqual(
_a ,[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] ,)
A_ : Dict = generator.model.config.eos_token_id
A_ : Optional[int] = """<pad>"""
A_ : List[Any] = generator(
["""This is a test""", """This is a second test"""] ,do_sample=_a ,num_return_sequences=2 ,batch_size=2 ,return_tensors=_a ,)
self.assertEqual(
_a ,[
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] ,)
@require_tf
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""tf""" )
# do_sample=False necessary for reproducibility
A_ : Dict = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
| 665 | 1 |
'''simple docstring'''
import qiskit
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
A_ : int = qiskit.Aer.get_backend("""aer_simulator""")
# Create a Quantum Circuit acting on the q register
A_ : Union[str, Any] = qiskit.QuantumCircuit(lowerCamelCase , lowerCamelCase)
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0])
# Execute the circuit on the simulator
A_ : List[Any] = qiskit.execute(lowerCamelCase , lowerCamelCase , shots=1000)
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCamelCase)
if __name__ == "__main__":
print(f"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 665 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """gpt_bigcode"""
a_ = ["""past_key_values"""]
a_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int] ,_a : Optional[int]=50257 ,_a : Dict=1024 ,_a : Union[str, Any]=768 ,_a : Union[str, Any]=12 ,_a : Union[str, Any]=12 ,_a : Tuple=None ,_a : int="gelu_pytorch_tanh" ,_a : Optional[Any]=0.1 ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : List[Any]=1e-5 ,_a : List[str]=0.02 ,_a : Any=True ,_a : Union[str, Any]=True ,_a : Tuple=50256 ,_a : Optional[int]=50256 ,_a : int=True ,_a : Optional[int]=True ,_a : Optional[int]=True ,**_a : List[str] ,):
'''simple docstring'''
A_ : Optional[Any] = vocab_size
A_ : int = n_positions
A_ : Union[str, Any] = n_embd
A_ : int = n_layer
A_ : Optional[int] = n_head
A_ : Union[str, Any] = n_inner
A_ : List[Any] = activation_function
A_ : Dict = resid_pdrop
A_ : int = embd_pdrop
A_ : Optional[int] = attn_pdrop
A_ : Union[str, Any] = layer_norm_epsilon
A_ : int = initializer_range
A_ : Union[str, Any] = scale_attn_weights
A_ : List[str] = use_cache
A_ : Tuple = attention_softmax_in_fpaa
A_ : List[str] = scale_attention_softmax_in_fpaa
A_ : Union[str, Any] = multi_query
A_ : Any = bos_token_id
A_ : Optional[int] = eos_token_id
super().__init__(bos_token_id=_a ,eos_token_id=_a ,**_a )
| 665 | 1 |
'''simple docstring'''
# using dfs for finding eulerian path traversal
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : int , lowerCamelCase : Optional[Any]=None):
A_ : Optional[int] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
A_ , A_ : List[str] = True, True
A_ : int = dfs(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
return path
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Dict):
A_ : Dict = 0
A_ : Any = -1
for i in range(lowerCamelCase):
if i not in graph.keys():
continue
if len(graph[i]) % 2 == 1:
odd_degree_nodes += 1
A_ : str = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Optional[Any]):
A_ : Tuple = [[False for _ in range(max_node + 1)] for _ in range(max_node + 1)]
A_ , A_ : List[str] = check_circuit_or_path(lowerCamelCase , lowerCamelCase)
if check == 3:
print("""graph is not Eulerian""")
print("""no path""")
return
A_ : Any = 1
if check == 2:
A_ : Union[str, Any] = odd_node
print("""graph has a Euler path""")
if check == 1:
print("""graph has a Euler cycle""")
A_ : int = dfs(lowerCamelCase , lowerCamelCase , lowerCamelCase)
print(lowerCamelCase)
def lowerCamelCase ( ):
A_ : Dict = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
A_ : Union[str, Any] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
A_ : List[str] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
A_ : List[Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
A_ : str = {
1: [],
2: []
# all degree is zero
}
A_ : int = 10
check_euler(lowerCamelCase , lowerCamelCase)
check_euler(lowerCamelCase , lowerCamelCase)
check_euler(lowerCamelCase , lowerCamelCase)
check_euler(lowerCamelCase , lowerCamelCase)
check_euler(lowerCamelCase , lowerCamelCase)
if __name__ == "__main__":
main()
| 665 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__magic_name__ = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__magic_name__ = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase ( ):
A_ : Union[str, Any] = (
list(range(ord("""!""") , ord("""~""") + 1)) + list(range(ord("""¡""") , ord("""¬""") + 1)) + list(range(ord("""®""") , ord("""ÿ""") + 1))
)
A_ : Optional[Any] = bs[:]
A_ : List[str] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase)
cs.append(2**8 + n)
n += 1
A_ : List[Any] = [chr(lowerCamelCase) for n in cs]
return dict(zip(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int):
A_ : int = set()
A_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
A_ : List[str] = char
return pairs
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : int ,_a : Tuple ,_a : Union[str, Any] ,_a : Optional[Any]="replace" ,_a : Union[str, Any]="<s>" ,_a : Union[str, Any]="</s>" ,_a : int="</s>" ,_a : List[str]="<s>" ,_a : List[Any]="<unk>" ,_a : Any="<pad>" ,_a : Dict="<mask>" ,_a : Optional[int]=False ,**_a : List[Any] ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
super().__init__(
errors=_a ,bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,**_a ,)
with open(_a ,encoding="""utf-8""" ) as vocab_handle:
A_ : str = json.load(_a )
A_ : Optional[int] = {v: k for k, v in self.encoder.items()}
A_ : List[str] = errors # how to handle errors in decoding
A_ : List[str] = bytes_to_unicode()
A_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_a ,encoding="""utf-8""" ) as merges_handle:
A_ : Any = merges_handle.read().split("""\n""" )[1:-1]
A_ : str = [tuple(merge.split() ) for merge in bpe_merges]
A_ : int = dict(zip(_a ,range(len(_a ) ) ) )
A_ : List[Any] = {}
A_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A_ : Optional[Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def _a ( self : Any ):
'''simple docstring'''
return len(self.encoder )
def _a ( self : str ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def _a ( self : int ,_a : int ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A_ : Optional[int] = tuple(_a )
A_ : Any = get_pairs(_a )
if not pairs:
return token
while True:
A_ : Optional[Any] = min(_a ,key=lambda _a : self.bpe_ranks.get(_a ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A_ , A_ : Dict = bigram
A_ : int = []
A_ : Optional[Any] = 0
while i < len(_a ):
try:
A_ : List[str] = word.index(_a ,_a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A_ : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A_ : str = tuple(_a )
A_ : str = new_word
if len(_a ) == 1:
break
else:
A_ : int = get_pairs(_a )
A_ : Optional[int] = """ """.join(_a )
A_ : List[str] = word
return word
def _a ( self : Dict ,_a : Optional[int] ):
'''simple docstring'''
A_ : Any = []
for token in re.findall(self.pat ,_a ):
A_ : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(""" """ ) )
return bpe_tokens
def _a ( self : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
return self.encoder.get(_a ,self.encoder.get(self.unk_token ) )
def _a ( self : int ,_a : Dict ):
'''simple docstring'''
return self.decoder.get(_a )
def _a ( self : Optional[int] ,_a : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = """""".join(_a )
A_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_a ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_a ,ensure_ascii=_a ) + """\n""" )
A_ : int = 0
with open(_a ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
A_ : Dict = token_index
writer.write(""" """.join(_a ) + """\n""" )
index += 1
return vocab_file, merge_file
def _a ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : int = [self.cls_token_id]
A_ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Union[str, Any] = [self.sep_token_id]
A_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : str ,_a : Optional[int] ,_a : Union[str, Any]=False ,**_a : Dict ):
'''simple docstring'''
A_ : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
A_ : Optional[int] = """ """ + text
return (text, kwargs)
| 665 | 1 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : Tuple):
# Load configuration defined in the metadata file
with open(lowerCamelCase) as metadata_file:
A_ : Tuple = json.load(lowerCamelCase)
A_ : List[Any] = LukeConfig(use_entity_aware_attention=lowerCamelCase , **metadata["""model_config"""])
# Load in the weights from the checkpoint_path
A_ : List[Any] = torch.load(lowerCamelCase , map_location="""cpu""")["""module"""]
# Load the entity vocab file
A_ : Any = load_original_entity_vocab(lowerCamelCase)
# add an entry for [MASK2]
A_ : int = max(entity_vocab.values()) + 1
config.entity_vocab_size += 1
A_ : int = XLMRobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""])
# Add special tokens to the token vocabulary for downstream tasks
A_ : str = AddedToken("""<ent>""" , lstrip=lowerCamelCase , rstrip=lowerCamelCase)
A_ : List[str] = AddedToken("""<ent2>""" , lstrip=lowerCamelCase , rstrip=lowerCamelCase)
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]})
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}')
tokenizer.save_pretrained(lowerCamelCase)
with open(os.path.join(lowerCamelCase , """tokenizer_config.json""") , """r""") as f:
A_ : Dict = json.load(lowerCamelCase)
A_ : Optional[Any] = """MLukeTokenizer"""
with open(os.path.join(lowerCamelCase , """tokenizer_config.json""") , """w""") as f:
json.dump(lowerCamelCase , lowerCamelCase)
with open(os.path.join(lowerCamelCase , MLukeTokenizer.vocab_files_names["""entity_vocab_file"""]) , """w""") as f:
json.dump(lowerCamelCase , lowerCamelCase)
A_ : Dict = MLukeTokenizer.from_pretrained(lowerCamelCase)
# Initialize the embeddings of the special tokens
A_ : List[str] = tokenizer.convert_tokens_to_ids(["""@"""])[0]
A_ : int = tokenizer.convert_tokens_to_ids(["""#"""])[0]
A_ : Tuple = state_dict["""embeddings.word_embeddings.weight"""]
A_ : Dict = word_emb[ent_init_index].unsqueeze(0)
A_ : str = word_emb[enta_init_index].unsqueeze(0)
A_ : List[str] = torch.cat([word_emb, ent_emb, enta_emb])
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
A_ : List[str] = state_dict[bias_name]
A_ : int = decoder_bias[ent_init_index].unsqueeze(0)
A_ : str = decoder_bias[enta_init_index].unsqueeze(0)
A_ : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias])
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers):
for matrix_name in ["query.weight", "query.bias"]:
A_ : str = F'encoder.layer.{layer_index}.attention.self.'
A_ : str = state_dict[prefix + matrix_name]
A_ : Any = state_dict[prefix + matrix_name]
A_ : Union[str, Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A_ : Optional[int] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
A_ : List[str] = entity_emb[entity_vocab["""[MASK]"""]].unsqueeze(0)
A_ : List[str] = torch.cat([entity_emb, entity_mask_emb])
# add [MASK2] for 'entity_predictions.bias'
A_ : int = state_dict["""entity_predictions.bias"""]
A_ : Tuple = entity_prediction_bias[entity_vocab["""[MASK]"""]].unsqueeze(0)
A_ : int = torch.cat([entity_prediction_bias, entity_mask_bias])
A_ : Any = LukeForMaskedLM(config=lowerCamelCase).eval()
state_dict.pop("""entity_predictions.decoder.weight""")
state_dict.pop("""lm_head.decoder.weight""")
state_dict.pop("""lm_head.decoder.bias""")
A_ : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("""lm_head""") or key.startswith("""entity_predictions""")):
A_ : int = state_dict[key]
else:
A_ : List[str] = state_dict[key]
A_ , A_ : Tuple = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase)
if set(lowerCamelCase) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}')
if set(lowerCamelCase) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}')
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
A_ : List[str] = MLukeTokenizer.from_pretrained(lowerCamelCase , task="""entity_classification""")
A_ : Any = """ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."""
A_ : List[str] = (0, 9)
A_ : Any = tokenizer(lowerCamelCase , entity_spans=[span] , return_tensors="""pt""")
A_ : List[Any] = model(**lowerCamelCase)
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ : Dict = torch.Size((1, 33, 768))
A_ : Dict = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]])
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}')
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase , atol=1E-4):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A_ : Dict = torch.Size((1, 1, 768))
A_ : int = torch.tensor([[-0.1482, 0.0609, 0.0322]])
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}')
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCamelCase , atol=1E-4):
raise ValueError
# Verify masked word/entity prediction
A_ : Tuple = MLukeTokenizer.from_pretrained(lowerCamelCase)
A_ : List[str] = """Tokyo is the capital of <mask>."""
A_ : Optional[int] = (24, 30)
A_ : Optional[int] = tokenizer(lowerCamelCase , entity_spans=[span] , return_tensors="""pt""")
A_ : Dict = model(**lowerCamelCase)
A_ : Tuple = encoding["""input_ids"""][0].tolist()
A_ : List[str] = input_ids.index(tokenizer.convert_tokens_to_ids("""<mask>"""))
A_ : Tuple = outputs.logits[0][mask_position_id].argmax(dim=-1)
assert "Japan" == tokenizer.decode(lowerCamelCase)
A_ : Optional[Any] = outputs.entity_logits[0][0].argmax().item()
A_ : Dict = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("""en:""")][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(lowerCamelCase))
model.save_pretrained(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : str):
A_ : str = ["""[MASK]""", """[PAD]""", """[UNK]"""]
A_ : int = [json.loads(lowerCamelCase) for line in open(lowerCamelCase)]
A_ : Union[str, Any] = {}
for entry in data:
A_ : Tuple = entry["""id"""]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
A_ : Optional[int] = entity_id
break
A_ : Any = F'{language}:{entity_name}'
A_ : List[Any] = entity_id
return new_mapping
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__magic_name__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 665 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.txt'}
__magic_name__ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__magic_name__ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__magic_name__ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ConvBertTokenizer
def __init__( self : str ,_a : Dict=None ,_a : List[Any]=None ,_a : Dict=True ,_a : List[str]="[UNK]" ,_a : Any="[SEP]" ,_a : str="[PAD]" ,_a : List[Any]="[CLS]" ,_a : List[str]="[MASK]" ,_a : Union[str, Any]=True ,_a : Any=None ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
A_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_a ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_a ) != tokenize_chinese_chars
):
A_ : Dict = getattr(_a ,normalizer_state.pop("""type""" ) )
A_ : str = do_lower_case
A_ : Any = strip_accents
A_ : int = tokenize_chinese_chars
A_ : Tuple = normalizer_class(**_a )
A_ : Any = do_lower_case
def _a ( self : List[Any] ,_a : List[Any] ,_a : Any=None ):
'''simple docstring'''
A_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : int = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : List[Any] = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 665 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__magic_name__ = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__magic_name__ = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase ( ):
A_ : Union[str, Any] = (
list(range(ord("""!""") , ord("""~""") + 1)) + list(range(ord("""¡""") , ord("""¬""") + 1)) + list(range(ord("""®""") , ord("""ÿ""") + 1))
)
A_ : Optional[Any] = bs[:]
A_ : List[str] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase)
cs.append(2**8 + n)
n += 1
A_ : List[Any] = [chr(lowerCamelCase) for n in cs]
return dict(zip(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int):
A_ : int = set()
A_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
A_ : List[str] = char
return pairs
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : int ,_a : Tuple ,_a : Union[str, Any] ,_a : Optional[Any]="replace" ,_a : Union[str, Any]="<s>" ,_a : Union[str, Any]="</s>" ,_a : int="</s>" ,_a : List[str]="<s>" ,_a : List[Any]="<unk>" ,_a : Any="<pad>" ,_a : Dict="<mask>" ,_a : Optional[int]=False ,**_a : List[Any] ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
super().__init__(
errors=_a ,bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,**_a ,)
with open(_a ,encoding="""utf-8""" ) as vocab_handle:
A_ : str = json.load(_a )
A_ : Optional[int] = {v: k for k, v in self.encoder.items()}
A_ : List[str] = errors # how to handle errors in decoding
A_ : List[str] = bytes_to_unicode()
A_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_a ,encoding="""utf-8""" ) as merges_handle:
A_ : Any = merges_handle.read().split("""\n""" )[1:-1]
A_ : str = [tuple(merge.split() ) for merge in bpe_merges]
A_ : int = dict(zip(_a ,range(len(_a ) ) ) )
A_ : List[Any] = {}
A_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A_ : Optional[Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def _a ( self : Any ):
'''simple docstring'''
return len(self.encoder )
def _a ( self : str ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def _a ( self : int ,_a : int ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A_ : Optional[int] = tuple(_a )
A_ : Any = get_pairs(_a )
if not pairs:
return token
while True:
A_ : Optional[Any] = min(_a ,key=lambda _a : self.bpe_ranks.get(_a ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A_ , A_ : Dict = bigram
A_ : int = []
A_ : Optional[Any] = 0
while i < len(_a ):
try:
A_ : List[str] = word.index(_a ,_a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A_ : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A_ : str = tuple(_a )
A_ : str = new_word
if len(_a ) == 1:
break
else:
A_ : int = get_pairs(_a )
A_ : Optional[int] = """ """.join(_a )
A_ : List[str] = word
return word
def _a ( self : Dict ,_a : Optional[int] ):
'''simple docstring'''
A_ : Any = []
for token in re.findall(self.pat ,_a ):
A_ : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(""" """ ) )
return bpe_tokens
def _a ( self : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
return self.encoder.get(_a ,self.encoder.get(self.unk_token ) )
def _a ( self : int ,_a : Dict ):
'''simple docstring'''
return self.decoder.get(_a )
def _a ( self : Optional[int] ,_a : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = """""".join(_a )
A_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_a ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_a ,ensure_ascii=_a ) + """\n""" )
A_ : int = 0
with open(_a ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
A_ : Dict = token_index
writer.write(""" """.join(_a ) + """\n""" )
index += 1
return vocab_file, merge_file
def _a ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : int = [self.cls_token_id]
A_ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Union[str, Any] = [self.sep_token_id]
A_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : str ,_a : Optional[int] ,_a : Union[str, Any]=False ,**_a : Dict ):
'''simple docstring'''
A_ : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
A_ : Optional[int] = """ """ + text
return (text, kwargs)
| 665 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__magic_name__ = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = BartTokenizer
def __init__( self : str ,_a : Any=None ,_a : Optional[int]=None ,_a : int=None ,_a : Optional[int]="replace" ,_a : Dict="<s>" ,_a : Optional[Any]="</s>" ,_a : Dict="</s>" ,_a : Tuple="<s>" ,_a : Optional[Any]="<unk>" ,_a : List[str]="<pad>" ,_a : int="<mask>" ,_a : str=False ,_a : List[str]=True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(
_a ,_a ,tokenizer_file=_a ,errors=_a ,bos_token=_a ,eos_token=_a ,sep_token=_a ,cls_token=_a ,unk_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,trim_offsets=_a ,**_a ,)
A_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : List[str] = getattr(_a ,pre_tok_state.pop("""type""" ) )
A_ : Optional[int] = add_prefix_space
A_ : int = pre_tok_class(**_a )
A_ : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ : str = """post_processor"""
A_ : List[Any] = getattr(self.backend_tokenizer ,_a ,_a )
if tokenizer_component_instance:
A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : Tuple = tuple(state["""sep"""] )
if "cls" in state:
A_ : Tuple = tuple(state["""cls"""] )
A_ : List[str] = False
if state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : Dict = add_prefix_space
A_ : Any = True
if state.get("""trim_offsets""" ,_a ) != trim_offsets:
A_ : Union[str, Any] = trim_offsets
A_ : List[Any] = True
if changes_to_apply:
A_ : Optional[int] = getattr(_a ,state.pop("""type""" ) )
A_ : Tuple = component_class(**_a )
setattr(self.backend_tokenizer ,_a ,_a )
@property
def _a ( self : List[str] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _a ( self : Union[str, Any] ,_a : Any ):
'''simple docstring'''
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else value
A_ : List[Any] = value
def _a ( self : str ,*_a : str ,**_a : Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*_a ,**_a )
def _a ( self : str ,*_a : List[Any] ,**_a : str ):
'''simple docstring'''
A_ : List[str] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*_a ,**_a )
def _a ( self : Optional[int] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : str = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
def _a ( self : str ,_a : Optional[int] ,_a : int=None ):
'''simple docstring'''
A_ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Dict = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 665 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
A_ : int = TapasConfig.from_json_file(lowerCamelCase)
# set absolute/relative position embeddings parameter
A_ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
A_ : Optional[int] = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WTQ":
# run_task_main.py hparams
A_ : Tuple = 4
A_ : Optional[Any] = True
# hparam_utils.py hparams
A_ : Any = 0.66_4694
A_ : str = 0.20_7951
A_ : Any = 0.12_1194
A_ : str = True
A_ : Dict = True
A_ : int = False
A_ : int = 0.035_2513
A_ : Tuple = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
A_ : int = 4
A_ : Union[str, Any] = False
# hparam_utils.py hparams
A_ : Dict = 36.4519
A_ : List[Any] = 0.90_3421
A_ : Any = 222.088
A_ : Optional[Any] = True
A_ : Optional[int] = True
A_ : Optional[Any] = True
A_ : Optional[int] = 0.76_3141
A_ : Any = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "TABFACT":
A_ : Any = TapasForSequenceClassification(config=lowerCamelCase)
elif task == "MLM":
A_ : List[Any] = TapasForMaskedLM(config=lowerCamelCase)
elif task == "INTERMEDIATE_PRETRAINING":
A_ : Union[str, Any] = TapasModel(config=lowerCamelCase)
else:
raise ValueError(F'Task {task} not supported.')
print(F'Building PyTorch model from configuration: {config}')
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}')
model.save_pretrained(lowerCamelCase)
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}')
A_ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512)
tokenizer.save_pretrained(lowerCamelCase)
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__magic_name__ = TypeVar('T')
class __lowerCAmelCase ( Generic[T] ):
'''simple docstring'''
a_ = 42 # Cache store of keys
a_ = 42 # References of the keys in cache
a_ = 10 # Maximum capacity of cache
def __init__( self : Optional[int] ,_a : int ):
'''simple docstring'''
A_ : Optional[Any] = deque()
A_ : List[str] = set()
if not n:
A_ : List[str] = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
A_ : Tuple = n
def _a ( self : int ,_a : T ):
'''simple docstring'''
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
A_ : Dict = self.dq_store.pop()
self.key_reference.remove(_a )
else:
self.dq_store.remove(_a )
self.dq_store.appendleft(_a )
self.key_reference.add(_a )
def _a ( self : List[Any] ):
'''simple docstring'''
for k in self.dq_store:
print(_a )
def __repr__( self : Optional[int] ):
'''simple docstring'''
return f'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 665 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""vqvae"""]
def __init__( self : Optional[Any] ,_a : AutoencoderKL ,_a : UNetaDConditionModel ,_a : Mel ,_a : Union[DDIMScheduler, DDPMScheduler] ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_a ,scheduler=_a ,mel=_a ,vqvae=_a )
def _a ( self : str ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_a ) else 1000
@torch.no_grad()
def __call__( self : Optional[int] ,_a : int = 1 ,_a : str = None ,_a : np.ndarray = None ,_a : int = 0 ,_a : int = 0 ,_a : int = None ,_a : torch.Generator = None ,_a : float = 0 ,_a : float = 0 ,_a : torch.Generator = None ,_a : float = 0 ,_a : torch.Tensor = None ,_a : torch.Tensor = None ,_a : int=True ,):
'''simple docstring'''
A_ : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_a )
A_ : Union[str, Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
A_ : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
A_ : int = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_a ,device=self.device ,)
A_ : List[Any] = noise
A_ : str = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_a ,_a )
A_ : Any = self.mel.audio_slice_to_image(_a )
A_ : Union[str, Any] = np.frombuffer(input_image.tobytes() ,dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
A_ : Optional[Any] = (input_image / 255) * 2 - 1
A_ : Union[str, Any] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
A_ : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(_a ,0 ) ).latent_dist.sample(
generator=_a )[0]
A_ : List[str] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
A_ : Any = self.scheduler.add_noise(_a ,_a ,self.scheduler.timesteps[start_step - 1] )
A_ : Tuple = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
A_ : Tuple = int(mask_start_secs * pixels_per_second )
A_ : str = int(mask_end_secs * pixels_per_second )
A_ : int = self.scheduler.add_noise(_a ,_a ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_a ):
A_ : Optional[Any] = self.unet(_a ,_a ,_a )["""sample"""]
else:
A_ : List[Any] = self.unet(_a ,_a )["""sample"""]
if isinstance(self.scheduler ,_a ):
A_ : Dict = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,eta=_a ,generator=_a ,)["""prev_sample"""]
else:
A_ : Any = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,generator=_a ,)["""prev_sample"""]
if mask is not None:
if mask_start > 0:
A_ : Tuple = mask[:, step, :, :mask_start]
if mask_end > 0:
A_ : List[str] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
A_ : str = 1 / self.vqvae.config.scaling_factor * images
A_ : Union[str, Any] = self.vqvae.decode(_a )["""sample"""]
A_ : int = (images / 2 + 0.5).clamp(0 ,1 )
A_ : str = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
A_ : Optional[int] = (images * 255).round().astype("""uint8""" )
A_ : List[Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_a ,mode="""RGB""" ).convert("""L""" ) for _ in images) )
A_ : Tuple = [self.mel.image_to_audio(_a ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_a )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_a ) )
@torch.no_grad()
def _a ( self : Union[str, Any] ,_a : List[Image.Image] ,_a : int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_a )
self.scheduler.set_timesteps(_a )
A_ : Optional[Any] = np.array(
[np.frombuffer(image.tobytes() ,dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
A_ : List[str] = (sample / 255) * 2 - 1
A_ : Optional[int] = torch.Tensor(_a ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
A_ : List[str] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
A_ : Any = self.scheduler.alphas_cumprod[t]
A_ : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
A_ : str = 1 - alpha_prod_t
A_ : List[str] = self.unet(_a ,_a )["""sample"""]
A_ : str = (1 - alpha_prod_t_prev) ** 0.5 * model_output
A_ : Union[str, Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
A_ : Optional[int] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _a ( _a : torch.Tensor ,_a : torch.Tensor ,_a : float ):
'''simple docstring'''
A_ : List[Any] = acos(torch.dot(torch.flatten(_a ) ,torch.flatten(_a ) ) / torch.norm(_a ) / torch.norm(_a ) )
return sin((1 - alpha) * theta ) * xa / sin(_a ) + sin(alpha * theta ) * xa / sin(_a )
| 665 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int = 10**9):
A_ : Optional[int] = 1
A_ : int = 2
A_ : List[Any] = 0
A_ : Optional[Any] = 0
A_ : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
A_ : Optional[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 665 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__magic_name__ = 16
__magic_name__ = 32
def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16):
A_ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""")
A_ : str = load_dataset("""glue""" , """mrpc""")
def tokenize_function(lowerCamelCase : Dict):
# max_length=None => use the model max length (it's actually the default)
A_ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A_ : Tuple = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(lowerCamelCase : Tuple):
# On TPU it's best to pad everything to the same length or training will be very slow.
A_ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A_ : List[Any] = 16
elif accelerator.mixed_precision != "no":
A_ : Any = 8
else:
A_ : Tuple = None
return tokenizer.pad(
lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
A_ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=lowerCamelCase)
A_ : str = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Dict):
# Initialize accelerator
A_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : List[Any] = config["""lr"""]
A_ : List[Any] = int(config["""num_epochs"""])
A_ : int = int(config["""seed"""])
A_ : Dict = int(config["""batch_size"""])
A_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""")
# If the batch size is too big we use gradient accumulation
A_ : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A_ : Any = batch_size // MAX_GPU_BATCH_SIZE
A_ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(lowerCamelCase)
A_ , A_ : List[str] = get_dataloaders(lowerCamelCase , lowerCamelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ : str = model.to(accelerator.device)
# Instantiate optimizer
A_ : str = AdamW(params=model.parameters() , lr=lowerCamelCase)
# Instantiate scheduler
A_ : Tuple = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ , A_ , A_ , A_ , A_ : Union[str, Any] = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Now we train the model
for epoch in range(lowerCamelCase):
model.train()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
A_ : Optional[int] = model(**lowerCamelCase)
A_ : List[Any] = outputs.loss
A_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
A_ : Union[str, Any] = model(**lowerCamelCase)
A_ : Any = outputs.logits.argmax(dim=-1)
A_ , A_ : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]))
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
A_ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase)
def lowerCamelCase ( ):
A_ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""")
parser.add_argument(
"""--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""")
A_ : Dict = parser.parse_args()
A_ : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 1 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : str = None ,_a : uuid.UUID = None ,_a : List[str]=None ,_a : Tuple=None ):
'''simple docstring'''
if not conversation_id:
A_ : Union[str, Any] = uuid.uuida()
if past_user_inputs is None:
A_ : str = []
if generated_responses is None:
A_ : List[Any] = []
A_ : uuid.UUID = conversation_id
A_ : List[str] = past_user_inputs
A_ : List[str] = generated_responses
A_ : Optional[str] = text
def __eq__( self : Dict ,_a : str ):
'''simple docstring'''
if not isinstance(_a ,_a ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def _a ( self : Optional[Any] ,_a : str ,_a : bool = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".' )
A_ : Any = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' )
else:
A_ : List[str] = text
def _a ( self : str ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
A_ : Dict = None
def _a ( self : str ,_a : str ):
'''simple docstring'''
self.generated_responses.append(_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs ,self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : List[str] ):
'''simple docstring'''
A_ : List[str] = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
A_ : Any = """user""" if is_user else """bot"""
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
__SCREAMING_SNAKE_CASE , r"""
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
""" , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] ,*_a : Optional[Any] ,**_a : Union[str, Any] ):
'''simple docstring'''
super().__init__(*_a ,**_a )
if self.tokenizer.pad_token_id is None:
A_ : List[str] = self.tokenizer.eos_token
def _a ( self : Dict ,_a : Dict=None ,_a : Tuple=None ,_a : Tuple=None ,**_a : Any ):
'''simple docstring'''
A_ : Tuple = {}
A_ : Tuple = {}
A_ : Dict = {}
if min_length_for_response is not None:
A_ : str = min_length_for_response
if minimum_tokens is not None:
A_ : List[Any] = minimum_tokens
if "max_length" in generate_kwargs:
A_ : Tuple = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
A_ : str = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_a )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any ,_a : Union[Conversation, List[Conversation]] ,_a : Dict=0 ,**_a : List[Any] ):
'''simple docstring'''
A_ : int = super().__call__(_a ,num_workers=_a ,**_a )
if isinstance(_a ,_a ) and len(_a ) == 1:
return outputs[0]
return outputs
def _a ( self : str ,_a : Conversation ,_a : str=32 ):
'''simple docstring'''
if not isinstance(_a ,_a ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer ,"""_build_conversation_input_ids""" ):
A_ : str = self.tokenizer._build_conversation_input_ids(_a )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
A_ : int = self._legacy_parse_and_tokenize(_a )
if self.framework == "pt":
A_ : Tuple = torch.LongTensor([input_ids] )
elif self.framework == "tf":
A_ : Optional[Any] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def _a ( self : int ,_a : str ,_a : Union[str, Any]=10 ,**_a : int ):
'''simple docstring'''
A_ : int = generate_kwargs.get("""max_length""" ,self.model.config.max_length )
A_ : List[Any] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' )
A_ : Dict = max_length - minimum_tokens
A_ : Optional[int] = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
A_ : Dict = model_inputs["""attention_mask"""][:, -trim:]
A_ : Union[str, Any] = model_inputs.pop("""conversation""" )
A_ : Optional[Any] = max_length
A_ : Dict = self.model.generate(**_a ,**_a )
if self.model.config.is_encoder_decoder:
A_ : List[str] = 1
else:
A_ : Any = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def _a ( self : Union[str, Any] ,_a : Tuple ,_a : Tuple=True ):
'''simple docstring'''
A_ : Any = model_outputs["""output_ids"""]
A_ : Optional[int] = self.tokenizer.decode(
output_ids[0] ,skip_special_tokens=_a ,clean_up_tokenization_spaces=_a ,)
A_ : Optional[int] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(_a )
return conversation
def _a ( self : Dict ,_a : Conversation ):
'''simple docstring'''
A_ : str = self.tokenizer.eos_token_id
A_ : int = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_a ,add_special_tokens=_a ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_a ,add_special_tokens=_a ) )
if len(_a ) > self.tokenizer.model_max_length:
A_ : List[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 665 |
'''simple docstring'''
import functools
def lowerCamelCase ( lowerCamelCase : list[int] , lowerCamelCase : list[int]):
# Validation
if not isinstance(lowerCamelCase , lowerCamelCase) or not all(isinstance(lowerCamelCase , lowerCamelCase) for day in days):
raise ValueError("""The parameter days should be a list of integers""")
if len(lowerCamelCase) != 3 or not all(isinstance(lowerCamelCase , lowerCamelCase) for cost in costs):
raise ValueError("""The parameter costs should be a list of three integers""")
if len(lowerCamelCase) == 0:
return 0
if min(lowerCamelCase) <= 0:
raise ValueError("""All days elements should be greater than 0""")
if max(lowerCamelCase) >= 366:
raise ValueError("""All days elements should be less than 366""")
A_ : Tuple = set(lowerCamelCase)
@functools.cache
def dynamic_programming(lowerCamelCase : int) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1)
return min(
costs[0] + dynamic_programming(index + 1) , costs[1] + dynamic_programming(index + 7) , costs[2] + dynamic_programming(index + 30) , )
return dynamic_programming(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : str ,_a : Any ):
'''simple docstring'''
A_ : Any = data
A_ : Node | None = None
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any ):
'''simple docstring'''
A_ : List[Any] = None
A_ : List[str] = None
def __iter__( self : Any ):
'''simple docstring'''
A_ : Tuple = self.head
while self.head:
yield node.data
A_ : Optional[int] = node.next
if node == self.head:
break
def __len__( self : Any ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : Optional[Any] ):
'''simple docstring'''
return "->".join(str(_a ) for item in iter(self ) )
def _a ( self : List[Any] ,_a : Any ):
'''simple docstring'''
self.insert_nth(len(self ) ,_a )
def _a ( self : Dict ,_a : Any ):
'''simple docstring'''
self.insert_nth(0 ,_a )
def _a ( self : Tuple ,_a : int ,_a : Any ):
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError("""list index out of range.""" )
A_ : Union[str, Any] = Node(_a )
if self.head is None:
A_ : str = new_node # first node points itself
A_ : str = new_node
elif index == 0: # insert at head
A_ : Dict = self.head
A_ : int = new_node
else:
A_ : str = self.head
for _ in range(index - 1 ):
A_ : Tuple = temp.next
A_ : Any = temp.next
A_ : List[str] = new_node
if index == len(self ) - 1: # insert at tail
A_ : List[Any] = new_node
def _a ( self : Optional[Any] ):
'''simple docstring'''
return self.delete_nth(0 )
def _a ( self : List[Any] ):
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def _a ( self : Union[str, Any] ,_a : int = 0 ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError("""list index out of range.""" )
A_ : int = self.head
if self.head == self.tail: # just one node
A_ : str = None
elif index == 0: # delete head node
A_ : List[Any] = self.tail.next.next
A_ : List[str] = self.head.next
else:
A_ : Any = self.head
for _ in range(index - 1 ):
A_ : Union[str, Any] = temp.next
A_ : str = temp.next
A_ : Dict = temp.next.next
if index == len(self ) - 1: # delete at tail
A_ : Optional[Any] = temp
return delete_node.data
def _a ( self : int ):
'''simple docstring'''
return len(self ) == 0
def lowerCamelCase ( ):
A_ : Dict = CircularLinkedList()
assert len(lowerCamelCase) == 0
assert circular_linked_list.is_empty() is True
assert str(lowerCamelCase) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1)
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0)
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5):
assert len(lowerCamelCase) == i
circular_linked_list.insert_nth(lowerCamelCase , i + 1)
assert str(lowerCamelCase) == "->".join(str(lowerCamelCase) for i in range(1 , 6))
circular_linked_list.insert_tail(6)
assert str(lowerCamelCase) == "->".join(str(lowerCamelCase) for i in range(1 , 7))
circular_linked_list.insert_head(0)
assert str(lowerCamelCase) == "->".join(str(lowerCamelCase) for i in range(0 , 7))
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(lowerCamelCase) == "->".join(str(lowerCamelCase) for i in range(1 , 6))
assert circular_linked_list.delete_nth(2) == 3
circular_linked_list.insert_nth(2 , 3)
assert str(lowerCamelCase) == "->".join(str(lowerCamelCase) for i in range(1 , 6))
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCamelCase ( lowerCamelCase : NDArray[floataa] , lowerCamelCase : NDArray[floataa] , lowerCamelCase : list[int] , lowerCamelCase : int , ):
A_ , A_ : int = coefficient_matrix.shape
A_ , A_ : Union[str, Any] = constant_matrix.shape
if rowsa != colsa:
A_ : Any = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if colsa != 1:
A_ : Tuple = F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if rowsa != rowsa:
A_ : Dict = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(lowerCamelCase)
if len(lowerCamelCase) != rowsa:
A_ : Union[str, Any] = (
"""Number of initial values must be equal to number of rows in coefficient """
F'matrix but received {len(lowerCamelCase)} and {rowsa}'
)
raise ValueError(lowerCamelCase)
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""")
A_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1)
A_ , A_ : int = table.shape
strictly_diagonally_dominant(lowerCamelCase)
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase):
A_ : List[Any] = []
for row in range(lowerCamelCase):
A_ : int = 0
for col in range(lowerCamelCase):
if col == row:
A_ : List[str] = table[row][col]
elif col == cols - 1:
A_ : str = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ : Union[str, Any] = (temp + val) / denom
new_val.append(lowerCamelCase)
A_ : Tuple = new_val
return [float(lowerCamelCase) for i in new_val]
def lowerCamelCase ( lowerCamelCase : NDArray[floataa]):
A_ , A_ : Dict = table.shape
A_ : Union[str, Any] = True
for i in range(0 , lowerCamelCase):
A_ : str = 0
for j in range(0 , cols - 1):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""")
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = SpeechTaTokenizer
a_ = False
a_ = True
def _a ( self : int ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
A_ : int = SpeechTaTokenizer(_a )
A_ : Dict = AddedToken("""<mask>""" ,lstrip=_a ,rstrip=_a )
A_ : Tuple = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self : Tuple ,_a : Any ):
'''simple docstring'''
A_ : Any = """this is a test"""
A_ : Dict = """this is a test"""
return input_text, output_text
def _a ( self : Optional[int] ,_a : List[Any] ,_a : Union[str, Any]=False ,_a : List[str]=20 ,_a : str=5 ):
'''simple docstring'''
A_ , A_ : Optional[int] = self.get_input_output_texts(_a )
A_ : str = tokenizer.encode(_a ,add_special_tokens=_a )
A_ : Dict = tokenizer.decode(_a ,clean_up_tokenization_spaces=_a )
return text, ids
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = """<pad>"""
A_ : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) ,_a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) ,_a )
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<s>""" )
self.assertEqual(vocab_keys[1] ,"""<pad>""" )
self.assertEqual(vocab_keys[-4] ,"""œ""" )
self.assertEqual(vocab_keys[-2] ,"""<mask>""" )
self.assertEqual(vocab_keys[-1] ,"""<ctc_blank>""" )
self.assertEqual(len(_a ) ,81 )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,79 )
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[str] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
A_ : int = tokenizer.vocab_size
A_ : Tuple = len(_a )
self.assertNotEqual(_a ,0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
A_ : str = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
A_ : Optional[int] = tokenizer.add_tokens(_a )
A_ : Dict = tokenizer.vocab_size
A_ : List[str] = len(_a )
self.assertNotEqual(_a ,0 )
self.assertEqual(_a ,_a )
self.assertEqual(_a ,len(_a ) )
self.assertEqual(_a ,all_size + len(_a ) )
A_ : Union[str, Any] = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" ,add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) ,4 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
A_ : Dict = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
A_ : List[Any] = tokenizer.add_special_tokens(_a )
A_ : int = tokenizer.vocab_size
A_ : Tuple = len(_a )
self.assertNotEqual(_a ,0 )
self.assertEqual(_a ,_a )
self.assertEqual(_a ,len(_a ) )
self.assertEqual(_a ,all_size_a + len(_a ) )
A_ : List[str] = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" ,add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) ,6 )
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] ,tokens[1] )
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] ,tokens[-4] )
self.assertEqual(tokens[0] ,tokenizer.eos_token_id )
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id )
def _a ( self : str ):
'''simple docstring'''
pass
def _a ( self : Dict ):
'''simple docstring'''
pass
def _a ( self : Any ):
'''simple docstring'''
A_ : List[str] = self.get_tokenizer()
A_ : List[str] = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(_a ,[SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) ,[4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] ,)
A_ : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_a ,[SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
A_ : Optional[Any] = tokenizer.convert_tokens_to_ids(_a )
# fmt: off
self.assertListEqual(_a ,[4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
A_ : List[Any] = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a ,[SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : List[str] = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
A_ : Optional[Any] = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a ,model_name="""microsoft/speecht5_asr""" ,revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" ,sequences=_a ,)
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
A_ : Any = len(lowerCamelCase)
A_ : Optional[Any] = len(lowerCamelCase)
A_ : Optional[int] = [[False for _ in range(m + 1)] for _ in range(n + 1)]
A_ : Union[str, Any] = True
for i in range(lowerCamelCase):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A_ : Optional[int] = True
if a[i].islower():
A_ : List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : List[Any]):
A_ : str = torch.load(lowerCamelCase , map_location="""cpu""")
if "model" in sd.keys():
A_ : Dict = torch.load(lowerCamelCase , map_location="""cpu""")["""model"""]
# pop unnecessary weights
A_ : str = [
"""decoder.version""",
"""decoder.output_projection.weight""",
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowerCamelCase)
A_ : List[Any] = {
"""decoder.project_in_dim.weight""": """decoder.project_in.weight""",
"""decoder.project_out_dim.weight""": """decoder.project_out.weight""",
"""decoder.layer_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.layer_norm.bias""": """decoder.final_layer_norm.bias""",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
A_ : Optional[Any] = sd.pop(lowerCamelCase)
A_ : Dict = list(sd.keys())
for key in keys:
if ".qkv_proj." in key:
A_ : int = sd[key]
# We split QKV in separate Q,K,V
A_ : int = key.replace(""".qkv_proj.""" , """.q_proj.""")
A_ : List[str] = key.replace(""".qkv_proj.""" , """.k_proj.""")
A_ : List[Any] = key.replace(""".qkv_proj.""" , """.v_proj.""")
A_ : int = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
A_ , A_ , A_ : Optional[int] = torch.split(lowerCamelCase , depth // 3 , dim=0)
A_ : Any = q
A_ : Tuple = k
A_ : str = v
del sd[key]
return sd
@torch.no_grad()
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict=None):
A_ : List[str] = load_checkpoint(lowerCamelCase)
if config is not None:
A_ : Union[str, Any] = OPTConfig.from_pretrained(lowerCamelCase)
else:
A_ : List[Any] = OPTConfig()
A_ : Optional[Any] = OPTModel(lowerCamelCase).half().eval()
model.load_state_dict(lowerCamelCase)
# Check results
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
model.save_pretrained(lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
__magic_name__ = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 665 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
a_ = 42
a_ = 42
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : list[list[Edge]] = [[] for _ in range(_a )]
A_ : List[Any] = size
def __getitem__( self : int ,_a : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _a ( self : str ):
'''simple docstring'''
return self._size
def _a ( self : str ,_a : int ,_a : int ,_a : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(_a ,_a ) )
def _a ( self : Dict ,_a : int ,_a : int ):
'''simple docstring'''
A_ : Tuple = deque([start_vertex] )
A_ : list[int | None] = [None] * self.size
A_ : Union[str, Any] = 0
while queue:
A_ : List[Any] = queue.popleft()
A_ : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A_ : Union[str, Any] = current_distance + edge.weight
A_ : Optional[Any] = distances[edge.destination_vertex]
if (
isinstance(_a ,_a )
and new_distance >= dest_vertex_distance
):
continue
A_ : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCamelCase ( lowerCamelCase : List[str]):
A_ : Tuple = [False] * len(lowerCamelCase)
A_ : List[Any] = [-1] * len(lowerCamelCase)
def dfs(lowerCamelCase : Tuple , lowerCamelCase : List[Any]):
A_ : Tuple = True
A_ : str = c
for u in graph[v]:
if not visited[u]:
dfs(lowerCamelCase , 1 - c)
for i in range(len(lowerCamelCase)):
if not visited[i]:
dfs(lowerCamelCase , 0)
for i in range(len(lowerCamelCase)):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__magic_name__ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int = 10**9):
A_ : Optional[int] = 1
A_ : int = 2
A_ : List[Any] = 0
A_ : Optional[Any] = 0
A_ : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
A_ : Optional[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : Optional[int] ,_a : List[Any]=3 ,_a : Tuple=32 ,_a : int=3 ,_a : List[Any]=10 ,_a : Optional[Any]=[10, 20, 30, 40] ,_a : int=[1, 1, 2, 1] ,_a : Any=True ,_a : Tuple=True ,_a : int="relu" ,_a : Optional[Any]=3 ,_a : List[Any]=None ,):
'''simple docstring'''
A_ : Union[str, Any] = parent
A_ : List[Any] = batch_size
A_ : int = image_size
A_ : Dict = num_channels
A_ : List[str] = embeddings_size
A_ : List[Any] = hidden_sizes
A_ : str = depths
A_ : Dict = is_training
A_ : List[str] = use_labels
A_ : Union[str, Any] = hidden_act
A_ : List[str] = num_labels
A_ : Dict = scope
A_ : int = len(_a )
def _a ( self : Dict ):
'''simple docstring'''
A_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Tuple = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] ,self.num_labels )
A_ : Optional[int] = self.get_config()
return config, pixel_values, labels
def _a ( self : List[Any] ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,)
def _a ( self : Any ,_a : List[str] ,_a : List[str] ,_a : int ):
'''simple docstring'''
A_ : Dict = TFRegNetModel(config=_a )
A_ : List[Any] = model(_a ,training=_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def _a ( self : Dict ,_a : Union[str, Any] ,_a : Optional[Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Dict = self.num_labels
A_ : Optional[Any] = TFRegNetForImageClassification(_a )
A_ : Tuple = model(_a ,labels=_a ,training=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _a ( self : str ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
A_ , A_ , A_ : Union[str, Any] = config_and_inputs
A_ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
a_ = (
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Tuple = TFRegNetModelTester(self )
A_ : Optional[int] = ConfigTester(self ,config_class=_a ,has_text_modality=_a )
def _a ( self : Any ):
'''simple docstring'''
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def _a ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,reason="""TF does not support backprop for grouped convolutions on CPU.""" ,)
@slow
def _a ( self : Tuple ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def _a ( self : Dict ):
'''simple docstring'''
pass
def _a ( self : Tuple ):
'''simple docstring'''
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[str] = model_class(_a )
A_ : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Optional[Any] = [*signature.parameters.keys()]
A_ : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_a )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : Dict ):
'''simple docstring'''
def check_hidden_states_output(_a : Optional[Any] ,_a : Any ,_a : int ):
A_ : Union[str, Any] = model_class(_a )
A_ : Optional[int] = model(**self._prepare_for_class(_a ,_a ) ,training=_a )
A_ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ : Optional[Any] = self.model_tester.num_stages
self.assertEqual(len(_a ) ,expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 2, self.model_tester.image_size // 2] ,)
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : str = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A_ : List[Any] = layer_type
A_ : List[str] = True
check_hidden_states_output(_a ,_a ,_a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : Dict = True
check_hidden_states_output(_a ,_a ,_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ , A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(_a : Optional[Any] ,_a : str ,_a : Any ,_a : Dict={} ):
A_ : Dict = model(_a ,return_dict=_a ,**_a )
A_ : int = model(_a ,return_dict=_a ,**_a ).to_tuple()
def recursive_check(_a : List[str] ,_a : Optional[int] ):
if isinstance(_a ,(List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_a ,_a ):
recursive_check(_a ,_a )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(_a ,_a ) ) ,msg=(
"""Tuple and dict output are not equal. Difference:"""
f' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'
) ,)
recursive_check(_a ,_a )
for model_class in self.all_model_classes:
A_ : Dict = model_class(_a )
A_ : str = self._prepare_for_class(_a ,_a )
A_ : Optional[int] = self._prepare_for_class(_a ,_a )
check_equivalence(_a ,_a ,_a )
A_ : Any = self._prepare_for_class(_a ,_a ,return_labels=_a )
A_ : str = self._prepare_for_class(_a ,_a ,return_labels=_a )
check_equivalence(_a ,_a ,_a )
A_ : Tuple = self._prepare_for_class(_a ,_a )
A_ : Tuple = self._prepare_for_class(_a ,_a )
check_equivalence(_a ,_a ,_a ,{"""output_hidden_states""": True} )
A_ : Optional[Any] = self._prepare_for_class(_a ,_a ,return_labels=_a )
A_ : List[Any] = self._prepare_for_class(_a ,_a ,return_labels=_a )
check_equivalence(_a ,_a ,_a ,{"""output_hidden_states""": True} )
def _a ( self : str ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def _a ( self : Optional[int] ):
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = TFRegNetModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase ( ):
A_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_tf
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _a ( self : Optional[int] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : List[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
A_ : int = self.default_image_processor
A_ : Any = prepare_img()
A_ : List[str] = image_processor(images=_a ,return_tensors="""tf""" )
# forward pass
A_ : Optional[Any] = model(**_a ,training=_a )
# verify the logits
A_ : Any = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,_a )
A_ : Any = tf.constant([-0.4180, -1.5051, -3.4836] )
tf.debugging.assert_near(outputs.logits[0, :3] ,_a ,atol=1e-4 )
| 665 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase ( ):
A_ : Optional[int] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCamelCase)
A_ : Optional[int] = parser.add_subparsers(help="""accelerate command helpers""")
# Register commands
get_config_parser(subparsers=lowerCamelCase)
env_command_parser(subparsers=lowerCamelCase)
launch_command_parser(subparsers=lowerCamelCase)
tpu_command_parser(subparsers=lowerCamelCase)
test_command_parser(subparsers=lowerCamelCase)
# Let's go
A_ : Dict = parser.parse_args()
if not hasattr(lowerCamelCase , """func"""):
parser.print_help()
exit(1)
# Run
args.func(lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 1 |
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict):
# Initialise PyTorch model
A_ : Union[str, Any] = AlbertConfig.from_json_file(lowerCamelCase)
print(F'Building PyTorch model from configuration: {config}')
A_ : str = AlbertForPreTraining(lowerCamelCase)
# Load weights from tf checkpoint
load_tf_weights_in_albert(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}')
torch.save(model.state_dict() , lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--albert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained ALBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : Dict=None , lowerCamelCase : List[Any]=None):
if attention_mask is None:
A_ : Tuple = tf.cast(tf.math.not_equal(lowerCamelCase , config.pad_token_id) , tf.inta)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __lowerCAmelCase :
'''simple docstring'''
a_ = OPTConfig
a_ = {}
a_ = """gelu"""
def __init__( self : Tuple ,_a : List[str] ,_a : Union[str, Any]=13 ,_a : int=7 ,_a : List[Any]=True ,_a : Optional[Any]=False ,_a : List[str]=99 ,_a : Any=16 ,_a : List[str]=2 ,_a : Union[str, Any]=4 ,_a : Optional[int]=4 ,_a : int="gelu" ,_a : Optional[Any]=0.1 ,_a : str=0.1 ,_a : int=20 ,_a : Any=2 ,_a : int=1 ,_a : Union[str, Any]=0 ,_a : Tuple=16 ,_a : Optional[Any]=16 ,):
'''simple docstring'''
A_ : List[Any] = parent
A_ : Dict = batch_size
A_ : Optional[int] = seq_length
A_ : str = is_training
A_ : Any = use_labels
A_ : Tuple = vocab_size
A_ : Optional[int] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Optional[Any] = intermediate_size
A_ : Tuple = hidden_act
A_ : Tuple = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Optional[Any] = max_position_embeddings
A_ : Union[str, Any] = eos_token_id
A_ : Optional[Any] = pad_token_id
A_ : List[str] = bos_token_id
A_ : List[Any] = embed_dim
A_ : Optional[int] = word_embed_proj_dim
A_ : Union[str, Any] = False
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size )
A_ : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) ,1 )
A_ : Any = tf.concat([input_ids, eos_tensor] ,axis=1 )
A_ : Optional[int] = self.config_cls(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,embed_dim=self.embed_dim ,word_embed_proj_dim=self.word_embed_proj_dim ,is_encoder_decoder=_a ,**self.config_updates ,)
A_ : Any = prepare_opt_inputs_dict(_a ,_a )
return config, inputs_dict
def _a ( self : Optional[int] ,_a : Union[str, Any] ,_a : str ):
'''simple docstring'''
A_ : List[str] = TFOPTModel(config=_a )
A_ : Optional[int] = inputs_dict["""input_ids"""]
A_ : List[Any] = input_ids[:1, :]
A_ : Dict = inputs_dict["""attention_mask"""][:1, :]
A_ : str = 1
# first forward pass
A_ : Optional[int] = model(_a ,attention_mask=_a ,use_cache=_a )
A_ , A_ : Optional[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A_ : Union[str, Any] = tf.cast(ids_tensor((self.batch_size, 3) ,2 ) ,tf.inta )
# append to next input_ids and
A_ : Union[str, Any] = tf.concat([input_ids, next_tokens] ,axis=-1 )
A_ : Dict = tf.concat([attention_mask, next_attn_mask] ,axis=-1 )
A_ : Union[str, Any] = model(_a ,attention_mask=_a )[0]
A_ : Union[str, Any] = model(_a ,attention_mask=_a ,past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1] )
# select random slice
A_ : List[str] = int(ids_tensor((1,) ,output_from_past.shape[-1] ) )
A_ : int = output_from_no_past[:, -3:, random_slice_idx]
A_ : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a ,_a ,rtol=1e-3 )
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
a_ = (TFOPTForCausalLM,) if is_tf_available() else ()
a_ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
a_ = False
a_ = False
a_ = False
a_ = 10
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Dict = TFOPTModelTester(self )
A_ : Optional[Any] = ConfigTester(self ,config_class=_a )
def _a ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Any ):
'''simple docstring'''
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
def _a ( self : Any ):
'''simple docstring'''
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(_a : str ,_a : Optional[int] ):
if hasattr(_a ,"""weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(_a ,"""weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
A_ : str = model_class(config=_a )
A_ : str = _get_word_embedding_weight(_a ,model.get_input_embeddings() )
A_ : Union[str, Any] = _get_word_embedding_weight(_a ,model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(_a )
A_ : Tuple = _get_word_embedding_weight(_a ,model.get_input_embeddings() )
A_ : List[str] = _get_word_embedding_weight(_a ,model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
A_ : int = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] ,_a )
# check that weights remain the same after resizing
A_ : List[str] = True
for pa, pa in zip(old_input_embeddings.value() ,new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
A_ : Any = False
self.assertTrue(_a )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] ,_a )
A_ : Optional[Any] = True
for pa, pa in zip(old_output_embeddings.value() ,new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
A_ : Tuple = False
self.assertTrue(_a )
def lowerCamelCase ( lowerCamelCase : List[str]):
return tf.constant(lowerCamelCase , dtype=tf.intaa)
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ = 99
def _a ( self : Any ):
'''simple docstring'''
A_ : Optional[Any] = tf.ones((4, 1) ,dtype=tf.intaa ) * 2
A_ : Optional[Any] = tf.concat([ids_tensor((4, 6) ,self.vocab_size - 3 ) + 3, eos_column_vector] ,axis=1 )
A_ : Optional[int] = input_ids.shape[0]
A_ : List[Any] = OPTConfig(
vocab_size=self.vocab_size ,hidden_size=24 ,num_hidden_layers=2 ,num_attention_heads=2 ,ffn_dim=32 ,max_position_embeddings=48 ,eos_token_id=2 ,pad_token_id=1 ,bos_token_id=0 ,)
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : Any ):
'''simple docstring'''
A_ : List[str] = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
A_ : Optional[int] = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
A_ : Dict = tf.not_equal(_a ,model.config.pad_token_id )
with tf.GradientTape():
A_ : Union[str, Any] = model(input_ids=_a ,attention_mask=_a ).last_hidden_state
A_ : Dict = (1, 11, 512)
self.assertEqual(output.shape ,_a )
A_ : int = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] ,_a ,atol=4e-3 ) )
A_ : List[str] = tf.function(_a ,jit_compile=_a )
A_ : List[str] = xla_generate(_a ,_a )[0]
self.assertTrue(np.allclose(output[:, :3, :3] ,_a ,atol=4e-2 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Tuple ):
'''simple docstring'''
super().setUp()
A_ : Optional[int] = """facebook/opt-350m"""
def _a ( self : int ):
'''simple docstring'''
A_ : List[Any] = TFOPTForCausalLM.from_pretrained(self.path_model )
A_ : Optional[Any] = GPTaTokenizer.from_pretrained(self.path_model )
A_ : List[str] = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
A_ : str = tokenizer(_a ,return_tensors="""tf""" ,padding=_a ,add_special_tokens=_a )
A_ : List[Any] = tf.math.reduce_mean(model(inputs.input_ids ,attention_mask=inputs.attention_mask )[0] ,axis=-1 )
A_ : Optional[Any] = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(_a ,_a ,atol=1e-4 ) )
A_ : str = tf.function(_a ,jit_compile=_a )
A_ : str = tf.math.reduce_mean(xla_generate(inputs.input_ids ,attention_mask=inputs.attention_mask )[0] ,axis=-1 )
self.assertTrue(np.allclose(_a ,_a ,atol=1e-4 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def _a ( self : List[str] ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _a ( self : int ):
'''simple docstring'''
A_ : List[str] = """facebook/opt-125m"""
A_ : str = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
A_ : List[str] = []
A_ : Dict = GPTaTokenizer.from_pretrained(_a )
A_ : str = TFOPTForCausalLM.from_pretrained(_a )
for prompt in self.prompts:
A_ : List[Any] = tokenizer(_a ,return_tensors="""tf""" ).input_ids
A_ : Dict = model.generate(_a ,max_length=10 )
A_ : Any = tokenizer.batch_decode(_a ,skip_special_tokens=_a )
predicted_outputs += generated_string
self.assertListEqual(_a ,_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Any = """facebook/opt-350m"""
A_ : Dict = GPTaTokenizer.from_pretrained(_a )
A_ : int = TFOPTForCausalLM.from_pretrained(_a )
A_ : Dict = """left"""
# use different length sentences to test batching
A_ : str = [
"""Hello, my dog is a little""",
"""Today, I""",
]
A_ : Optional[Any] = tokenizer(_a ,return_tensors="""tf""" ,padding=_a )
A_ : Optional[int] = inputs["""input_ids"""]
A_ : Union[str, Any] = model.generate(input_ids=_a ,attention_mask=inputs["""attention_mask"""] )
A_ : Optional[int] = tokenizer(sentences[0] ,return_tensors="""tf""" ).input_ids
A_ : str = model.generate(input_ids=_a )
A_ : int = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] ,tf.intaa ) )
A_ : List[Any] = tokenizer(sentences[1] ,return_tensors="""tf""" ).input_ids
A_ : Tuple = model.generate(input_ids=_a ,max_length=model.config.max_length - num_paddings )
A_ : Optional[Any] = tokenizer.batch_decode(_a ,skip_special_tokens=_a )
A_ : Optional[Any] = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=_a )
A_ : List[str] = tokenizer.decode(output_padded[0] ,skip_special_tokens=_a )
A_ : List[str] = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(_a ,_a )
self.assertListEqual(_a ,[non_padded_sentence, padded_sentence] )
def _a ( self : str ):
'''simple docstring'''
A_ : Any = """facebook/opt-350m"""
A_ : List[Any] = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
A_ : Tuple = []
A_ : List[Any] = GPTaTokenizer.from_pretrained(_a )
A_ : str = TFOPTForCausalLM.from_pretrained(_a )
for prompt in self.prompts:
A_ : Optional[int] = tokenizer(_a ,return_tensors="""tf""" ).input_ids
A_ : Dict = model.generate(_a ,max_length=10 )
A_ : Dict = tokenizer.batch_decode(_a ,skip_special_tokens=_a )
predicted_outputs += generated_string
self.assertListEqual(_a ,_a )
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['YolosFeatureExtractor']
__magic_name__ = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
from random import random
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] ,_a : int | None = None ):
'''simple docstring'''
A_ : Any = value
A_ : Dict = random()
A_ : Node | None = None
A_ : Node | None = None
def __repr__( self : Optional[Any] ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{f'{self.value}: {self.prior:.5}': (self.left, self.right)} ,indent=1 )
def __str__( self : Union[str, Any] ):
'''simple docstring'''
A_ : List[str] = str(self.value ) + """ """
A_ : List[str] = str(self.left or """""" )
A_ : List[str] = str(self.right or """""" )
return value + left + right
def lowerCamelCase ( lowerCamelCase : Node | None , lowerCamelCase : int):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
A_ , A_ : Optional[Any] = split(root.left , lowerCamelCase)
return left, root
else:
A_ , A_ : Union[str, Any] = split(root.right , lowerCamelCase)
return root, right
def lowerCamelCase ( lowerCamelCase : Node | None , lowerCamelCase : Node | None):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
A_ : List[str] = merge(left.right , lowerCamelCase)
return left
else:
A_ : Optional[Any] = merge(lowerCamelCase , right.left)
return right
def lowerCamelCase ( lowerCamelCase : Node | None , lowerCamelCase : int):
A_ : Optional[int] = Node(lowerCamelCase)
A_ , A_ : List[str] = split(lowerCamelCase , lowerCamelCase)
return merge(merge(lowerCamelCase , lowerCamelCase) , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Node | None , lowerCamelCase : int):
A_ , A_ : Tuple = split(lowerCamelCase , value - 1)
A_ , A_ : int = split(lowerCamelCase , lowerCamelCase)
return merge(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Node | None):
if not root: # None
return
else:
inorder(root.left)
print(root.value , end=""",""")
inorder(root.right)
def lowerCamelCase ( lowerCamelCase : Node | None , lowerCamelCase : str):
for arg in args.split():
if arg[0] == "+":
A_ : List[str] = insert(lowerCamelCase , int(arg[1:]))
elif arg[0] == "-":
A_ : List[str] = erase(lowerCamelCase , int(arg[1:]))
else:
print("""Unknown command""")
return root
def lowerCamelCase ( ):
A_ : int = None
print(
"""enter numbers to create a tree, + value to add value into treap, """
"""- value to erase all nodes with value. 'q' to quit. """)
A_ : str = input()
while args != "q":
A_ : List[Any] = interact_treap(lowerCamelCase , lowerCamelCase)
print(lowerCamelCase)
A_ : str = input()
print("""good by!""")
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__magic_name__ = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['DPTFeatureExtractor']
__magic_name__ = ['DPTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'DPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DPTForDepthEstimation',
'DPTForSemanticSegmentation',
'DPTModel',
'DPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : str = [0] * len(lowerCamelCase)
A_ : Union[str, Any] = []
A_ : Union[str, Any] = []
A_ : Tuple = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCamelCase)):
if indegree[i] == 0:
queue.append(lowerCamelCase)
while queue:
A_ : Any = queue.pop(0)
cnt += 1
topo.append(lowerCamelCase)
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowerCamelCase)
if cnt != len(lowerCamelCase):
print("""Cycle exists""")
else:
print(lowerCamelCase)
# Adjacency List of Graph
__magic_name__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 665 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """vit_msn"""
def __init__( self : Optional[Any] ,_a : List[Any]=768 ,_a : List[str]=12 ,_a : str=12 ,_a : Any=3072 ,_a : List[Any]="gelu" ,_a : int=0.0 ,_a : Optional[int]=0.0 ,_a : Optional[int]=0.02 ,_a : Optional[Any]=1e-06 ,_a : Optional[int]=224 ,_a : Union[str, Any]=16 ,_a : List[Any]=3 ,_a : str=True ,**_a : Optional[Any] ,):
'''simple docstring'''
super().__init__(**_a )
A_ : List[Any] = hidden_size
A_ : str = num_hidden_layers
A_ : str = num_attention_heads
A_ : List[str] = intermediate_size
A_ : Union[str, Any] = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : int = initializer_range
A_ : List[Any] = layer_norm_eps
A_ : Union[str, Any] = image_size
A_ : List[Any] = patch_size
A_ : Dict = num_channels
A_ : int = qkv_bias
| 665 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[str]=7 ,_a : Dict=True ,_a : List[Any]=True ,_a : Dict=False ,_a : Optional[int]=True ,_a : List[Any]=99 ,_a : Any=32 ,_a : Optional[int]=5 ,_a : List[Any]=4 ,_a : int=37 ,_a : List[Any]="gelu" ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : Any=512 ,_a : int=16 ,_a : Optional[int]=2 ,_a : Any=0.02 ,_a : Any=3 ,_a : Any=4 ,_a : List[str]=None ,):
'''simple docstring'''
A_ : List[str] = parent
A_ : Any = batch_size
A_ : Tuple = seq_length
A_ : List[str] = is_training
A_ : Tuple = use_input_mask
A_ : Dict = use_token_type_ids
A_ : List[Any] = use_labels
A_ : Union[str, Any] = vocab_size
A_ : Any = hidden_size
A_ : str = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : str = intermediate_size
A_ : Tuple = hidden_act
A_ : Any = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : int = type_vocab_size
A_ : Union[str, Any] = type_sequence_label_size
A_ : Any = initializer_range
A_ : List[Any] = num_labels
A_ : Optional[Any] = num_choices
A_ : List[Any] = scope
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : int = None
if self.use_input_mask:
A_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Dict = None
if self.use_token_type_ids:
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : str = None
A_ : Any = None
A_ : str = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
A_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
def _a ( self : Union[str, Any] ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Any ,_a : Any ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Tuple ):
'''simple docstring'''
A_ : Any = LlamaModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[Any] = model(_a ,attention_mask=_a )
A_ : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Optional[int] ,_a : int ,_a : List[str] ,_a : Any ,_a : Any ,_a : Dict ,_a : List[str] ,_a : Optional[int] ,_a : Any ,_a : List[str] ,):
'''simple docstring'''
A_ : List[str] = True
A_ : Union[str, Any] = LlamaModel(_a )
model.to(_a )
model.eval()
A_ : Tuple = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,)
A_ : List[Any] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,)
A_ : int = model(_a ,attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Any ,_a : Any ,_a : Optional[int] ,_a : List[Any] ,_a : List[Any] ,_a : Dict ,_a : Tuple ,_a : Optional[int] ,_a : List[Any] ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : List[Any] = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
A_ : Dict = model(_a ,attention_mask=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : str ,_a : List[Any] ,_a : Dict ,_a : str ,_a : Tuple ,_a : Tuple ,_a : Tuple ,_a : Optional[Any] ,_a : Dict ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : Optional[Any] = True
A_ : Any = True
A_ : Tuple = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
A_ : Optional[int] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,use_cache=_a ,)
A_ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A_ : List[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
A_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
A_ : int = torch.cat([input_mask, next_mask] ,dim=-1 )
A_ : List[str] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
A_ : Any = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,past_key_values=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
# select random slice
A_ : List[str] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
A_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a ,_a ,atol=1e-3 ) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Any = config_and_inputs
A_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
a_ = (LlamaForCausalLM,) if is_torch_available() else ()
a_ = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = LlamaModelTester(self )
A_ : List[str] = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def _a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : Dict = type
self.model_tester.create_and_check_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = 3
A_ : Any = input_dict["""input_ids"""]
A_ : Union[str, Any] = input_ids.ne(1 ).to(_a )
A_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : int = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Dict ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : str = 3
A_ : Union[str, Any] = """single_label_classification"""
A_ : Union[str, Any] = input_dict["""input_ids"""]
A_ : List[Any] = input_ids.ne(1 ).to(_a )
A_ : Dict = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : List[str] = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = 3
A_ : Dict = """multi_label_classification"""
A_ : Any = input_dict["""input_ids"""]
A_ : Optional[Any] = input_ids.ne(1 ).to(_a )
A_ : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
A_ : Optional[int] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def _a ( self : Any ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _a ( self : Optional[Any] ,_a : List[Any] ):
'''simple docstring'''
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ids_tensor([1, 10] ,config.vocab_size )
A_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : int = LlamaModel(_a )
original_model.to(_a )
original_model.eval()
A_ : Tuple = original_model(_a ).last_hidden_state
A_ : Union[str, Any] = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : Tuple = {"""type""": scaling_type, """factor""": 10.0}
A_ : int = LlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
A_ : List[Any] = scaled_model(_a ).last_hidden_state
A_ : Any = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a ,_a ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Any = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" ,device_map="""auto""" )
A_ : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : str ):
'''simple docstring'''
A_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""auto""" )
A_ : int = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : Optional[int] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
A_ : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
A_ : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
A_ : List[str] = """Simply put, the theory of relativity states that """
A_ : Any = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
A_ : Union[str, Any] = tokenizer.encode(_a ,return_tensors="""pt""" )
A_ : List[str] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""sequential""" ,use_safetensors=_a )
# greedy generation outputs
A_ : str = model.generate(_a ,max_new_tokens=64 ,top_p=_a ,temperature=1 ,do_sample=_a )
A_ : Optional[Any] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=_a )
self.assertEqual(_a ,_a )
| 665 | 1 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
A_ : int = TapasConfig.from_json_file(lowerCamelCase)
# set absolute/relative position embeddings parameter
A_ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
A_ : Optional[int] = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WTQ":
# run_task_main.py hparams
A_ : Tuple = 4
A_ : Optional[Any] = True
# hparam_utils.py hparams
A_ : Any = 0.66_4694
A_ : str = 0.20_7951
A_ : Any = 0.12_1194
A_ : str = True
A_ : Dict = True
A_ : int = False
A_ : int = 0.035_2513
A_ : Tuple = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
A_ : int = 4
A_ : Union[str, Any] = False
# hparam_utils.py hparams
A_ : Dict = 36.4519
A_ : List[Any] = 0.90_3421
A_ : Any = 222.088
A_ : Optional[Any] = True
A_ : Optional[int] = True
A_ : Optional[Any] = True
A_ : Optional[int] = 0.76_3141
A_ : Any = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "TABFACT":
A_ : Any = TapasForSequenceClassification(config=lowerCamelCase)
elif task == "MLM":
A_ : List[Any] = TapasForMaskedLM(config=lowerCamelCase)
elif task == "INTERMEDIATE_PRETRAINING":
A_ : Union[str, Any] = TapasModel(config=lowerCamelCase)
else:
raise ValueError(F'Task {task} not supported.')
print(F'Building PyTorch model from configuration: {config}')
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}')
model.save_pretrained(lowerCamelCase)
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}')
A_ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512)
tokenizer.save_pretrained(lowerCamelCase)
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 665 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__magic_name__ = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__magic_name__ = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__magic_name__ = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def _a ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) ,homepage="""https://github.com/hendrycks/math""" ,codebase_urls=["""https://github.com/hendrycks/math"""] ,)
def _a ( self : List[Any] ,_a : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = 0.0
for i, j in zip(_a ,_a ):
n_correct += 1.0 if math_equivalence.is_equiv(_a ,_a ) else 0.0
A_ : List[str] = n_correct / len(_a )
return {
"accuracy": accuracy,
}
| 665 | 1 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__magic_name__ = True
except (ImportError, ModuleNotFoundError):
__magic_name__ = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowerCamelCase ( lowerCamelCase : str):
re.sub("""<n>""" , """""" , lowerCamelCase) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCamelCase))
| 665 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """retribert"""
def __init__( self : int ,_a : Dict=30522 ,_a : List[Any]=768 ,_a : Optional[Any]=8 ,_a : str=12 ,_a : str=3072 ,_a : Tuple="gelu" ,_a : Optional[int]=0.1 ,_a : Dict=0.1 ,_a : List[Any]=512 ,_a : Union[str, Any]=2 ,_a : Tuple=0.02 ,_a : List[str]=1e-12 ,_a : Dict=True ,_a : Tuple=128 ,_a : Optional[int]=0 ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,**_a )
A_ : Dict = vocab_size
A_ : int = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : int = intermediate_size
A_ : Tuple = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : Optional[int] = initializer_range
A_ : Dict = layer_norm_eps
A_ : str = share_encoders
A_ : List[Any] = projection_dim
| 665 | 1 |
'''simple docstring'''
import functools
def lowerCamelCase ( lowerCamelCase : list[int] , lowerCamelCase : list[int]):
# Validation
if not isinstance(lowerCamelCase , lowerCamelCase) or not all(isinstance(lowerCamelCase , lowerCamelCase) for day in days):
raise ValueError("""The parameter days should be a list of integers""")
if len(lowerCamelCase) != 3 or not all(isinstance(lowerCamelCase , lowerCamelCase) for cost in costs):
raise ValueError("""The parameter costs should be a list of three integers""")
if len(lowerCamelCase) == 0:
return 0
if min(lowerCamelCase) <= 0:
raise ValueError("""All days elements should be greater than 0""")
if max(lowerCamelCase) >= 366:
raise ValueError("""All days elements should be less than 366""")
A_ : Tuple = set(lowerCamelCase)
@functools.cache
def dynamic_programming(lowerCamelCase : int) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1)
return min(
costs[0] + dynamic_programming(index + 1) , costs[1] + dynamic_programming(index + 7) , costs[2] + dynamic_programming(index + 30) , )
return dynamic_programming(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'spiece.model'}
__magic_name__ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
__magic_name__ = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = []
def __init__( self : Optional[int] ,_a : int ,_a : Optional[Any]="<unk>" ,_a : int="<s>" ,_a : str="</s>" ,_a : Optional[Any]="<pad>" ,_a : Tuple="[SEP]" ,_a : Tuple="[MASK]" ,_a : Union[str, Any]="[CLS]" ,_a : Optional[Dict[str, Any]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : List[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
A_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,pad_token=_a ,sep_token=_a ,mask_token=_a ,cls_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
A_ : Optional[int] = vocab_file
A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Tuple = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self.__dict__.copy()
A_ : Union[str, Any] = None
return state
def __setstate__( self : List[Any] ,_a : Any ):
'''simple docstring'''
A_ : Tuple = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
A_ : Tuple = {}
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self : Union[str, Any] ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def _a ( self : Optional[int] ,_a : str ):
'''simple docstring'''
return self.sp_model.piece_to_id(_a )
def _a ( self : int ,_a : Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.sp_model.IdToPiece(_a )
return token
def _a ( self : Dict ,_a : int ):
'''simple docstring'''
A_ : int = []
A_ : Any = """"""
A_ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
A_ : Dict = True
A_ : Union[str, Any] = []
else:
current_sub_tokens.append(_a )
A_ : str = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def _a ( self : int ,_a : List[int] ,_a : bool = False ,_a : bool = None ,_a : bool = True ,**_a : str ,):
'''simple docstring'''
A_ : Any = kwargs.pop("""use_source_tokenizer""" ,_a )
A_ : Union[str, Any] = self.convert_ids_to_tokens(_a ,skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A_ : str = []
A_ : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
A_ : List[str] = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
A_ : Optional[int] = re.sub(r""" (\[(MASK|SEP)\])""" ,r"""\1""" ,""" """.join(_a ) )
else:
A_ : Tuple = """""".join(_a )
A_ : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A_ : Optional[Any] = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,"""wb""" ) as fi:
A_ : str = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def _a ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : List[Any] = [self.cls_token_id]
A_ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def _a ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Tuple = [self.sep_token_id]
A_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 665 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : str = [0] * len(lowerCamelCase)
A_ : Union[str, Any] = []
A_ : Union[str, Any] = []
A_ : Tuple = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCamelCase)):
if indegree[i] == 0:
queue.append(lowerCamelCase)
while queue:
A_ : Any = queue.pop(0)
cnt += 1
topo.append(lowerCamelCase)
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowerCamelCase)
if cnt != len(lowerCamelCase):
print("""Cycle exists""")
else:
print(lowerCamelCase)
# Adjacency List of Graph
__magic_name__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 665 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _a ( self : List[str] ,_a : int ,_a : Any ,_a : int ):
'''simple docstring'''
A_ : Dict = TextaTextGenerationPipeline(model=_a ,tokenizer=_a )
return generator, ["Something to write", "Something else"]
def _a ( self : str ,_a : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : Any = generator("""Something there""" )
self.assertEqual(_a ,[{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
A_ : List[Any] = generator(["""This is great !""", """Something else"""] ,num_return_sequences=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
A_ : List[str] = generator(
["""This is great !""", """Something else"""] ,num_return_sequences=2 ,batch_size=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
with self.assertRaises(_a ):
generator(4 )
@require_torch
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : int = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""pt""" )
# do_sample=False necessary for reproducibility
A_ : Tuple = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
A_ : Optional[int] = 3
A_ : Tuple = generator(
"""Something there""" ,num_return_sequences=_a ,num_beams=_a ,)
A_ : Optional[Any] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a ,_a )
A_ : Optional[int] = generator("""This is a test""" ,do_sample=_a ,num_return_sequences=2 ,return_tensors=_a )
self.assertEqual(
_a ,[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] ,)
A_ : Dict = generator.model.config.eos_token_id
A_ : Optional[int] = """<pad>"""
A_ : List[Any] = generator(
["""This is a test""", """This is a second test"""] ,do_sample=_a ,num_return_sequences=2 ,batch_size=2 ,return_tensors=_a ,)
self.assertEqual(
_a ,[
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] ,)
@require_tf
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""tf""" )
# do_sample=False necessary for reproducibility
A_ : Dict = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
| 665 | 1 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
A_ : Union[str, Any] = [2, 2, 6, 2] if """tiny""" in model_name else [2, 2, 18, 2]
A_ : Optional[int] = True if """large""" in model_name or """huge""" in model_name else False
A_ : Optional[int] = True if """large""" in model_name or """huge""" in model_name else False
A_ : Optional[int] = True if """large""" in model_name or """huge""" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
A_ : List[Any] = [3, 3, 3, 3]
A_ : int = [5, 5, 5, 5]
elif "fl4" in model_name:
A_ : int = [4, 4, 4, 4]
A_ : List[str] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
A_ : List[str] = [3, 3, 3, 3]
if "lrf" in model_name:
A_ : Dict = [3, 3, 3, 3]
else:
A_ : str = [2, 2, 2, 2]
if "tiny" in model_name:
A_ : List[Any] = 96
elif "small" in model_name:
A_ : Any = 96
elif "base" in model_name:
A_ : Dict = 128
elif "large" in model_name:
A_ : Tuple = 192
elif "xlarge" in model_name:
A_ : Optional[Any] = 256
elif "huge" in model_name:
A_ : Dict = 352
# set label information
A_ : Optional[Any] = """huggingface/label-files"""
if "large" in model_name or "huge" in model_name:
A_ : str = """imagenet-22k-id2label.json"""
else:
A_ : List[Any] = """imagenet-1k-id2label.json"""
A_ : str = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r"""))
A_ : Any = {int(lowerCamelCase): v for k, v in idalabel.items()}
A_ : Dict = {v: k for k, v in idalabel.items()}
A_ : Union[str, Any] = FocalNetConfig(
embed_dim=lowerCamelCase , depths=lowerCamelCase , focal_levels=lowerCamelCase , focal_windows=lowerCamelCase , use_conv_embed=lowerCamelCase , idalabel=lowerCamelCase , labelaid=lowerCamelCase , use_post_layernorm=lowerCamelCase , use_layerscale=lowerCamelCase , )
return config
def lowerCamelCase ( lowerCamelCase : List[str]):
if "patch_embed.proj" in name:
A_ : str = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""")
if "patch_embed.norm" in name:
A_ : List[str] = name.replace("""patch_embed.norm""" , """embeddings.norm""")
if "layers" in name:
A_ : Union[str, Any] = """encoder.""" + name
if "encoder.layers" in name:
A_ : Optional[Any] = name.replace("""encoder.layers""" , """encoder.stages""")
if "downsample.proj" in name:
A_ : Optional[Any] = name.replace("""downsample.proj""" , """downsample.projection""")
if "blocks" in name:
A_ : int = name.replace("""blocks""" , """layers""")
if "modulation.f.weight" in name or "modulation.f.bias" in name:
A_ : Optional[int] = name.replace("""modulation.f""" , """modulation.projection_in""")
if "modulation.h.weight" in name or "modulation.h.bias" in name:
A_ : Optional[int] = name.replace("""modulation.h""" , """modulation.projection_context""")
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
A_ : Any = name.replace("""modulation.proj""" , """modulation.projection_out""")
if name == "norm.weight":
A_ : Tuple = """layernorm.weight"""
if name == "norm.bias":
A_ : Dict = """layernorm.bias"""
if "head" in name:
A_ : int = name.replace("""head""" , """classifier""")
else:
A_ : Dict = """focalnet.""" + name
return name
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Dict , lowerCamelCase : Any=False):
# fmt: off
A_ : Optional[int] = {
"""focalnet-tiny""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth""",
"""focalnet-tiny-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth""",
"""focalnet-small""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth""",
"""focalnet-small-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth""",
"""focalnet-base""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth""",
"""focalnet-base-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth""",
"""focalnet-large-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth""",
"""focalnet-large-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth""",
"""focalnet-xlarge-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth""",
"""focalnet-xlarge-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth""",
}
# fmt: on
A_ : Union[str, Any] = model_name_to_url[model_name]
print("""Checkpoint URL: """ , lowerCamelCase)
A_ : Optional[Any] = torch.hub.load_state_dict_from_url(lowerCamelCase , map_location="""cpu""")["""model"""]
# rename keys
for key in state_dict.copy().keys():
A_ : Union[str, Any] = state_dict.pop(lowerCamelCase)
A_ : str = val
A_ : List[str] = get_focalnet_config(lowerCamelCase)
A_ : Dict = FocalNetForImageClassification(lowerCamelCase)
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase)
# verify conversion
A_ : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : List[Any] = BitImageProcessor(
do_resize=lowerCamelCase , size={"""shortest_edge""": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase , crop_size=224 , do_normalize=lowerCamelCase , image_mean=lowerCamelCase , image_std=lowerCamelCase , )
A_ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw)
A_ : Optional[Any] = processor(images=lowerCamelCase , return_tensors="""pt""")
A_ : Optional[int] = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225]),
])
A_ : Dict = image_transforms(lowerCamelCase).unsqueeze(0)
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase , atol=1E-4)
A_ : Union[str, Any] = model(**lowerCamelCase)
A_ : Dict = outputs.logits.argmax(-1).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx])
print("""First values of logits:""" , outputs.logits[0, :3])
if model_name == "focalnet-tiny":
A_ : Dict = torch.tensor([0.2166, -0.4368, 0.2191])
elif model_name == "focalnet-tiny-lrf":
A_ : Any = torch.tensor([1.1669, 0.0125, -0.1695])
elif model_name == "focalnet-small":
A_ : List[str] = torch.tensor([0.4917, -0.0430, 0.1341])
elif model_name == "focalnet-small-lrf":
A_ : Tuple = torch.tensor([-0.2588, -0.5342, -0.2331])
elif model_name == "focalnet-base":
A_ : Tuple = torch.tensor([-0.1655, -0.4090, -0.1730])
elif model_name == "focalnet-base-lrf":
A_ : Optional[int] = torch.tensor([0.5306, -0.0483, -0.3928])
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4)
print("""Looks ok!""")
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor of {model_name} to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase)
processor.save_pretrained(lowerCamelCase)
if push_to_hub:
print(F'Pushing model and processor of {model_name} to the hub...')
model.push_to_hub(F'{model_name}')
processor.push_to_hub(F'{model_name}')
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
__magic_name__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 665 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """gpt_bigcode"""
a_ = ["""past_key_values"""]
a_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int] ,_a : Optional[int]=50257 ,_a : Dict=1024 ,_a : Union[str, Any]=768 ,_a : Union[str, Any]=12 ,_a : Union[str, Any]=12 ,_a : Tuple=None ,_a : int="gelu_pytorch_tanh" ,_a : Optional[Any]=0.1 ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : List[Any]=1e-5 ,_a : List[str]=0.02 ,_a : Any=True ,_a : Union[str, Any]=True ,_a : Tuple=50256 ,_a : Optional[int]=50256 ,_a : int=True ,_a : Optional[int]=True ,_a : Optional[int]=True ,**_a : List[str] ,):
'''simple docstring'''
A_ : Optional[Any] = vocab_size
A_ : int = n_positions
A_ : Union[str, Any] = n_embd
A_ : int = n_layer
A_ : Optional[int] = n_head
A_ : Union[str, Any] = n_inner
A_ : List[Any] = activation_function
A_ : Dict = resid_pdrop
A_ : int = embd_pdrop
A_ : Optional[int] = attn_pdrop
A_ : Union[str, Any] = layer_norm_epsilon
A_ : int = initializer_range
A_ : Union[str, Any] = scale_attn_weights
A_ : List[str] = use_cache
A_ : Tuple = attention_softmax_in_fpaa
A_ : List[str] = scale_attention_softmax_in_fpaa
A_ : Union[str, Any] = multi_query
A_ : Any = bos_token_id
A_ : Optional[int] = eos_token_id
super().__init__(bos_token_id=_a ,eos_token_id=_a ,**_a )
| 665 | 1 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : int):
assert isinstance(lowerCamelCase , lowerCamelCase)
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True])
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : int , lowerCamelCase : Any):
A_ : Any = tmp_path / """cache"""
A_ : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : Optional[int] = ParquetDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase).read()
_check_parquet_dataset(lowerCamelCase , lowerCamelCase)
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any]):
A_ : List[str] = tmp_path / """cache"""
A_ : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A_ : Dict = features.copy() if features else default_expected_features
A_ : int = (
Features({feature: Value(lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
A_ : str = ParquetDatasetReader(lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase).read()
_check_parquet_dataset(lowerCamelCase , lowerCamelCase)
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train"""), """train""", """test"""])
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : List[str]):
A_ : str = tmp_path / """cache"""
A_ : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A_ : List[Any] = ParquetDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase , split=lowerCamelCase).read()
_check_parquet_dataset(lowerCamelCase , lowerCamelCase)
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list])
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : Optional[int]):
if issubclass(lowerCamelCase , lowerCamelCase):
A_ : Dict = parquet_path
elif issubclass(lowerCamelCase , lowerCamelCase):
A_ : Tuple = [parquet_path]
A_ : Optional[Any] = tmp_path / """cache"""
A_ : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A_ : Tuple = ParquetDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase).read()
_check_parquet_dataset(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any]=("train",)):
assert isinstance(lowerCamelCase , lowerCamelCase)
for split in splits:
A_ : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True])
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : Tuple):
A_ : Optional[int] = tmp_path / """cache"""
A_ : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : List[Any] = ParquetDatasetReader(
{"""train""": parquet_path} , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase).read()
_check_parquet_datasetdict(lowerCamelCase , lowerCamelCase)
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : Any):
A_ : int = tmp_path / """cache"""
A_ : List[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A_ : Optional[Any] = features.copy() if features else default_expected_features
A_ : Optional[Any] = (
Features({feature: Value(lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None
)
A_ : List[str] = ParquetDatasetReader({"""train""": parquet_path} , features=lowerCamelCase , cache_dir=lowerCamelCase).read()
_check_parquet_datasetdict(lowerCamelCase , lowerCamelCase)
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train"""), """train""", """test"""])
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Any , lowerCamelCase : List[Any]):
if split:
A_ : List[str] = {split: parquet_path}
else:
A_ : Optional[Any] = """train"""
A_ : Optional[int] = {"""train""": parquet_path, """test""": parquet_path}
A_ : Optional[Any] = tmp_path / """cache"""
A_ : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
A_ : List[str] = ParquetDatasetReader(lowerCamelCase , cache_dir=lowerCamelCase).read()
_check_parquet_datasetdict(lowerCamelCase , lowerCamelCase , splits=list(path.keys()))
assert all(dataset[split].split == split for split in path.keys())
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : int):
A_ : List[str] = ParquetDatasetWriter(lowerCamelCase , tmp_path / """foo.parquet""")
assert writer.write() > 0
A_ : Optional[Any] = pq.ParquetFile(tmp_path / """foo.parquet""")
A_ : Union[str, Any] = pf.read()
assert dataset.data.table == output_table
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
A_ : Optional[Any] = str(shared_datadir / """test_image_rgb.jpg""")
A_ : Optional[int] = {"""image""": [image_path]}
A_ : Any = Features({"""image""": Image()})
A_ : List[str] = Dataset.from_dict(lowerCamelCase , features=lowerCamelCase)
A_ : Optional[int] = ParquetDatasetWriter(lowerCamelCase , tmp_path / """foo.parquet""")
assert writer.write() > 0
A_ : Optional[int] = Dataset.from_parquet(str(tmp_path / """foo.parquet"""))
assert dataset.features == reloaded_dataset.features
A_ : List[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""") , streaming=lowerCamelCase).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"""feature, expected""" , [
(Features({"""foo""": Value("""int32""")}), None),
(Features({"""image""": Image(), """foo""": Value("""int32""")}), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"""nested""": Sequence(Audio())}), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Dict):
assert get_writer_batch_size(lowerCamelCase) == expected
| 665 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__magic_name__ = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__magic_name__ = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase ( ):
A_ : Union[str, Any] = (
list(range(ord("""!""") , ord("""~""") + 1)) + list(range(ord("""¡""") , ord("""¬""") + 1)) + list(range(ord("""®""") , ord("""ÿ""") + 1))
)
A_ : Optional[Any] = bs[:]
A_ : List[str] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase)
cs.append(2**8 + n)
n += 1
A_ : List[Any] = [chr(lowerCamelCase) for n in cs]
return dict(zip(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int):
A_ : int = set()
A_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
A_ : List[str] = char
return pairs
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : int ,_a : Tuple ,_a : Union[str, Any] ,_a : Optional[Any]="replace" ,_a : Union[str, Any]="<s>" ,_a : Union[str, Any]="</s>" ,_a : int="</s>" ,_a : List[str]="<s>" ,_a : List[Any]="<unk>" ,_a : Any="<pad>" ,_a : Dict="<mask>" ,_a : Optional[int]=False ,**_a : List[Any] ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
super().__init__(
errors=_a ,bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,**_a ,)
with open(_a ,encoding="""utf-8""" ) as vocab_handle:
A_ : str = json.load(_a )
A_ : Optional[int] = {v: k for k, v in self.encoder.items()}
A_ : List[str] = errors # how to handle errors in decoding
A_ : List[str] = bytes_to_unicode()
A_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_a ,encoding="""utf-8""" ) as merges_handle:
A_ : Any = merges_handle.read().split("""\n""" )[1:-1]
A_ : str = [tuple(merge.split() ) for merge in bpe_merges]
A_ : int = dict(zip(_a ,range(len(_a ) ) ) )
A_ : List[Any] = {}
A_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A_ : Optional[Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def _a ( self : Any ):
'''simple docstring'''
return len(self.encoder )
def _a ( self : str ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def _a ( self : int ,_a : int ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A_ : Optional[int] = tuple(_a )
A_ : Any = get_pairs(_a )
if not pairs:
return token
while True:
A_ : Optional[Any] = min(_a ,key=lambda _a : self.bpe_ranks.get(_a ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A_ , A_ : Dict = bigram
A_ : int = []
A_ : Optional[Any] = 0
while i < len(_a ):
try:
A_ : List[str] = word.index(_a ,_a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A_ : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A_ : str = tuple(_a )
A_ : str = new_word
if len(_a ) == 1:
break
else:
A_ : int = get_pairs(_a )
A_ : Optional[int] = """ """.join(_a )
A_ : List[str] = word
return word
def _a ( self : Dict ,_a : Optional[int] ):
'''simple docstring'''
A_ : Any = []
for token in re.findall(self.pat ,_a ):
A_ : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(""" """ ) )
return bpe_tokens
def _a ( self : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
return self.encoder.get(_a ,self.encoder.get(self.unk_token ) )
def _a ( self : int ,_a : Dict ):
'''simple docstring'''
return self.decoder.get(_a )
def _a ( self : Optional[int] ,_a : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = """""".join(_a )
A_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_a ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_a ,ensure_ascii=_a ) + """\n""" )
A_ : int = 0
with open(_a ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
A_ : Dict = token_index
writer.write(""" """.join(_a ) + """\n""" )
index += 1
return vocab_file, merge_file
def _a ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : int = [self.cls_token_id]
A_ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Union[str, Any] = [self.sep_token_id]
A_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : str ,_a : Optional[int] ,_a : Union[str, Any]=False ,**_a : Dict ):
'''simple docstring'''
A_ : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
A_ : Optional[int] = """ """ + text
return (text, kwargs)
| 665 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : list , lowerCamelCase : int , lowerCamelCase : int = 0 , lowerCamelCase : int = 0):
A_ : Dict = right or len(lowerCamelCase) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(lowerCamelCase , lowerCamelCase , left + 1 , right - 1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.txt'}
__magic_name__ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__magic_name__ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__magic_name__ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ConvBertTokenizer
def __init__( self : str ,_a : Dict=None ,_a : List[Any]=None ,_a : Dict=True ,_a : List[str]="[UNK]" ,_a : Any="[SEP]" ,_a : str="[PAD]" ,_a : List[Any]="[CLS]" ,_a : List[str]="[MASK]" ,_a : Union[str, Any]=True ,_a : Any=None ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
A_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_a ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_a ) != tokenize_chinese_chars
):
A_ : Dict = getattr(_a ,normalizer_state.pop("""type""" ) )
A_ : str = do_lower_case
A_ : Any = strip_accents
A_ : int = tokenize_chinese_chars
A_ : Tuple = normalizer_class(**_a )
A_ : Any = do_lower_case
def _a ( self : List[Any] ,_a : List[Any] ,_a : Any=None ):
'''simple docstring'''
A_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : int = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : List[Any] = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 665 | 1 |
'''simple docstring'''
from math import isqrt
def lowerCamelCase ( lowerCamelCase : int):
A_ : Tuple = [True] * max_number
for i in range(2 , isqrt(max_number - 1) + 1):
if is_prime[i]:
for j in range(i**2 , lowerCamelCase , lowerCamelCase):
A_ : Optional[Any] = False
return [i for i in range(2 , lowerCamelCase) if is_prime[i]]
def lowerCamelCase ( lowerCamelCase : int = 10**8):
A_ : Union[str, Any] = calculate_prime_numbers(max_number // 2)
A_ : str = 0
A_ : int = 0
A_ : Union[str, Any] = len(lowerCamelCase) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 665 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__magic_name__ = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = BartTokenizer
def __init__( self : str ,_a : Any=None ,_a : Optional[int]=None ,_a : int=None ,_a : Optional[int]="replace" ,_a : Dict="<s>" ,_a : Optional[Any]="</s>" ,_a : Dict="</s>" ,_a : Tuple="<s>" ,_a : Optional[Any]="<unk>" ,_a : List[str]="<pad>" ,_a : int="<mask>" ,_a : str=False ,_a : List[str]=True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(
_a ,_a ,tokenizer_file=_a ,errors=_a ,bos_token=_a ,eos_token=_a ,sep_token=_a ,cls_token=_a ,unk_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,trim_offsets=_a ,**_a ,)
A_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : List[str] = getattr(_a ,pre_tok_state.pop("""type""" ) )
A_ : Optional[int] = add_prefix_space
A_ : int = pre_tok_class(**_a )
A_ : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ : str = """post_processor"""
A_ : List[Any] = getattr(self.backend_tokenizer ,_a ,_a )
if tokenizer_component_instance:
A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : Tuple = tuple(state["""sep"""] )
if "cls" in state:
A_ : Tuple = tuple(state["""cls"""] )
A_ : List[str] = False
if state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : Dict = add_prefix_space
A_ : Any = True
if state.get("""trim_offsets""" ,_a ) != trim_offsets:
A_ : Union[str, Any] = trim_offsets
A_ : List[Any] = True
if changes_to_apply:
A_ : Optional[int] = getattr(_a ,state.pop("""type""" ) )
A_ : Tuple = component_class(**_a )
setattr(self.backend_tokenizer ,_a ,_a )
@property
def _a ( self : List[str] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _a ( self : Union[str, Any] ,_a : Any ):
'''simple docstring'''
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else value
A_ : List[Any] = value
def _a ( self : str ,*_a : str ,**_a : Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*_a ,**_a )
def _a ( self : str ,*_a : List[Any] ,**_a : str ):
'''simple docstring'''
A_ : List[str] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*_a ,**_a )
def _a ( self : Optional[int] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : str = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
def _a ( self : str ,_a : Optional[int] ,_a : int=None ):
'''simple docstring'''
A_ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Dict = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 665 | 1 |
'''simple docstring'''
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__magic_name__ = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
__magic_name__ = {
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.1_5},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
__magic_name__ = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__magic_name__ = 'facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
__magic_name__ = 'allenai'
def lowerCamelCase ( lowerCamelCase : Tuple):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
A_ : Optional[Any] = dict((re.sub(r"""@@$""" , """""" , lowerCamelCase), v) if k.endswith("""@@""") else (re.sub(r"""$""" , """</w>""" , lowerCamelCase), v) for k, v in d.items())
A_ : Any = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
A_ : List[Any] = d[k] # restore
return da
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Optional[Any]):
# prep
assert os.path.exists(lowerCamelCase)
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase)
print(F'Writing results to {pytorch_dump_folder_path}')
# handle various types of models
A_ : Dict = basename(lowerCamelCase)
A_ : Optional[int] = dirname(lowerCamelCase)
A_ : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
A_ : str = cls.hub_models()
A_ : Any = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
A_ : str = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'using checkpoint {checkpoint_file}')
A_ : List[str] = hub_utils.from_pretrained(
lowerCamelCase , lowerCamelCase , lowerCamelCase , archive_map=lowerCamelCase , **lowerCamelCase)
A_ : Tuple = vars(chkpt["""args"""]["""model"""])
A_ : Optional[int] = args["""source_lang"""]
A_ : Tuple = args["""target_lang"""]
A_ : List[Any] = dirname(lowerCamelCase)
A_ : Any = basename(lowerCamelCase)
# dicts
A_ : Optional[int] = os.path.join(lowerCamelCase , F'dict.{src_lang}.txt')
A_ : Any = os.path.join(lowerCamelCase , F'dict.{tgt_lang}.txt')
A_ : List[str] = Dictionary.load(lowerCamelCase)
A_ : Optional[int] = rewrite_dict_keys(src_dict.indices)
A_ : int = len(lowerCamelCase)
A_ : Union[str, Any] = os.path.join(lowerCamelCase , """vocab-src.json""")
print(F'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records')
with open(lowerCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase))
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
A_ : Tuple = True
for k in src_vocab.keys():
if not k.islower():
A_ : Tuple = False
break
A_ : str = Dictionary.load(lowerCamelCase)
A_ : Optional[int] = rewrite_dict_keys(tgt_dict.indices)
A_ : Optional[int] = len(lowerCamelCase)
A_ : Dict = os.path.join(lowerCamelCase , """vocab-tgt.json""")
print(F'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records')
with open(lowerCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase))
# merges_file (bpecodes)
A_ : Optional[int] = os.path.join(lowerCamelCase , VOCAB_FILES_NAMES["""merges_file"""])
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
A_ : Any = os.path.join(lowerCamelCase , lowerCamelCase)
if os.path.exists(lowerCamelCase):
break
with open(lowerCamelCase , encoding="""utf-8""") as fin:
A_ : str = fin.read()
A_ : int = re.sub(r""" \d+$""" , """""" , lowerCamelCase , 0 , re.M) # remove frequency number
print(F'Generating {merges_file}')
with open(lowerCamelCase , """w""" , encoding="""utf-8""") as fout:
fout.write(lowerCamelCase)
# model config
A_ : int = os.path.join(lowerCamelCase , """config.json""")
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'need to extend tokenizer to support bpe={args["bpe"]}'
assert args["tokenizer"] == "moses", F'need to extend tokenizer to support bpe={args["tokenizer"]}'
A_ : Any = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
A_ : Any = 5
A_ : List[Any] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
A_ : Tuple = best_score_hparams[model_dir]["""length_penalty"""]
else:
A_ : Optional[Any] = 1.0
print(F'Generating {fsmt_model_config_file}')
with open(lowerCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase))
# tokenizer config
A_ : Optional[Any] = os.path.join(lowerCamelCase , lowerCamelCase)
A_ : int = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1024,
"""do_lower_case""": do_lower_case,
}
print(F'Generating {fsmt_tokenizer_config_file}')
with open(lowerCamelCase , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(lowerCamelCase , ensure_ascii=lowerCamelCase , indent=lowerCamelCase))
# model
A_ : Dict = chkpt["""models"""][0]
A_ : int = model.state_dict()
# rename keys to start with 'model.'
A_ : Optional[Any] = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items())
# remove unneeded keys
A_ : List[str] = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase , lowerCamelCase)
A_ : str = FSMTConfig.from_pretrained(lowerCamelCase)
A_ : Dict = FSMTForConditionalGeneration(lowerCamelCase)
# check that it loads ok
model_new.load_state_dict(lowerCamelCase , strict=lowerCamelCase)
# save
A_ : Dict = os.path.join(lowerCamelCase , lowerCamelCase)
print(F'Generating {pytorch_weights_dump_path}')
torch.save(lowerCamelCase , lowerCamelCase)
print("""Conversion is done!""")
print("""\nLast step is to upload the files to s3""")
print(F'cd {data_root}')
print(F'transformers-cli upload {model_dir}')
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 665 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
A_ : int = TapasConfig.from_json_file(lowerCamelCase)
# set absolute/relative position embeddings parameter
A_ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
A_ : Optional[int] = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WTQ":
# run_task_main.py hparams
A_ : Tuple = 4
A_ : Optional[Any] = True
# hparam_utils.py hparams
A_ : Any = 0.66_4694
A_ : str = 0.20_7951
A_ : Any = 0.12_1194
A_ : str = True
A_ : Dict = True
A_ : int = False
A_ : int = 0.035_2513
A_ : Tuple = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
A_ : int = 4
A_ : Union[str, Any] = False
# hparam_utils.py hparams
A_ : Dict = 36.4519
A_ : List[Any] = 0.90_3421
A_ : Any = 222.088
A_ : Optional[Any] = True
A_ : Optional[int] = True
A_ : Optional[Any] = True
A_ : Optional[int] = 0.76_3141
A_ : Any = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "TABFACT":
A_ : Any = TapasForSequenceClassification(config=lowerCamelCase)
elif task == "MLM":
A_ : List[Any] = TapasForMaskedLM(config=lowerCamelCase)
elif task == "INTERMEDIATE_PRETRAINING":
A_ : Union[str, Any] = TapasModel(config=lowerCamelCase)
else:
raise ValueError(F'Task {task} not supported.')
print(F'Building PyTorch model from configuration: {config}')
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}')
model.save_pretrained(lowerCamelCase)
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}')
A_ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512)
tokenizer.save_pretrained(lowerCamelCase)
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 665 | 1 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__magic_name__ = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
__magic_name__ = get_tests_dir('fixtures/vocab.json')
__magic_name__ = get_tests_dir('fixtures')
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Any = 0
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(_a ,_a )
def _a ( self : Optional[int] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Dict = WavaVecaConfig()
A_ : Optional[Any] = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" )
# save in new folder
model_config.save_pretrained(_a )
processor.save_pretrained(_a )
A_ : str = AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a ,_a )
def _a ( self : Any ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(_a ,os.path.join(_a ,_a ) )
copyfile(_a ,os.path.join(_a ,"""vocab.json""" ) )
A_ : List[Any] = AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a ,_a )
def _a ( self : Any ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : int = WavaVecaFeatureExtractor()
A_ : List[str] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
A_ : List[str] = WavaVecaProcessor(_a ,_a )
# save in new folder
processor.save_pretrained(_a )
# drop `processor_class` in tokenizer
with open(os.path.join(_a ,_a ) ,"""r""" ) as f:
A_ : Dict = json.load(_a )
config_dict.pop("""processor_class""" )
with open(os.path.join(_a ,_a ) ,"""w""" ) as f:
f.write(json.dumps(_a ) )
A_ : Union[str, Any] = AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a ,_a )
def _a ( self : Optional[int] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : str = WavaVecaFeatureExtractor()
A_ : List[Any] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" )
A_ : int = WavaVecaProcessor(_a ,_a )
# save in new folder
processor.save_pretrained(_a )
# drop `processor_class` in feature extractor
with open(os.path.join(_a ,_a ) ,"""r""" ) as f:
A_ : Optional[int] = json.load(_a )
config_dict.pop("""processor_class""" )
with open(os.path.join(_a ,_a ) ,"""w""" ) as f:
f.write(json.dumps(_a ) )
A_ : Any = AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a ,_a )
def _a ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Optional[Any] = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" )
model_config.save_pretrained(_a )
# copy relevant files
copyfile(_a ,os.path.join(_a ,"""vocab.json""" ) )
# create emtpy sample processor
with open(os.path.join(_a ,_a ) ,"""w""" ) as f:
f.write("""{}""" )
A_ : Any = AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a ,_a )
def _a ( self : Tuple ):
'''simple docstring'''
with self.assertRaises(_a ):
A_ : List[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
A_ : Any = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" ,trust_remote_code=_a )
A_ : List[str] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" ,trust_remote_code=_a )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ ,"""NewProcessor""" )
A_ : Optional[Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ ,"""NewFeatureExtractor""" )
A_ : int = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ ,"""NewTokenizerFast""" )
# Test we can also load the slow version
A_ : Any = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" ,trust_remote_code=_a ,use_fast=_a )
A_ : Tuple = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ ,"""NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ ,"""NewTokenizer""" )
def _a ( self : Optional[int] ):
'''simple docstring'''
try:
AutoConfig.register("""custom""" ,_a )
AutoFeatureExtractor.register(_a ,_a )
AutoTokenizer.register(_a ,slow_tokenizer_class=_a )
AutoProcessor.register(_a ,_a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoProcessor.register(_a ,_a )
# Now that the config is registered, it can be used as any other config with the auto-API
A_ : Any = CustomFeatureExtractor.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : str = os.path.join(_a ,"""vocab.txt""" )
with open(_a ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
A_ : str = CustomTokenizer(_a )
A_ : List[Any] = CustomProcessor(_a ,_a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(_a )
A_ : Any = AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a ,_a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _a ( self : Optional[Any] ):
'''simple docstring'''
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = False
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = False
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """AutoFeatureExtractor"""
a_ = """AutoTokenizer"""
a_ = False
try:
AutoConfig.register("""custom""" ,_a )
AutoFeatureExtractor.register(_a ,_a )
AutoTokenizer.register(_a ,slow_tokenizer_class=_a )
AutoProcessor.register(_a ,_a )
# If remote code is not set, the default is to use local classes.
A_ : List[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" )
self.assertEqual(processor.__class__.__name__ ,"""NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
A_ : int = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" ,trust_remote_code=_a )
self.assertEqual(processor.__class__.__name__ ,"""NewProcessor""" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
A_ : List[str] = AutoProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_processor""" ,trust_remote_code=_a )
self.assertEqual(processor.__class__.__name__ ,"""NewProcessor""" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : List[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(processor.__class__.__name__ ,"""BertTokenizerFast""" )
def _a ( self : List[str] ):
'''simple docstring'''
A_ : List[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" )
self.assertEqual(processor.__class__.__name__ ,"""ConvNextImageProcessor""" )
@is_staging_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def _a ( cls : str ):
'''simple docstring'''
A_ : Tuple = TOKEN
HfFolder.save_token(_a )
@classmethod
def _a ( cls : List[Any] ):
'''simple docstring'''
try:
delete_repo(token=cls._token ,repo_id="""test-processor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""valid_org/test-processor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id="""test-dynamic-processor""" )
except HTTPError:
pass
def _a ( self : str ):
'''simple docstring'''
A_ : Union[str, Any] = WavaVecaProcessor.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_a ,"""test-processor""" ) ,push_to_hub=_a ,use_auth_token=self._token )
A_ : Union[str, Any] = WavaVecaProcessor.from_pretrained(f'{USER}/test-processor' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_a ,getattr(new_processor.feature_extractor ,_a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() ,processor.tokenizer.get_vocab() )
def _a ( self : str ):
'''simple docstring'''
A_ : Dict = WavaVecaProcessor.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_a ,"""test-processor-org""" ) ,push_to_hub=_a ,use_auth_token=self._token ,organization="""valid_org""" ,)
A_ : Any = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_a ,getattr(new_processor.feature_extractor ,_a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() ,processor.tokenizer.get_vocab() )
def _a ( self : List[Any] ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
A_ : Tuple = CustomFeatureExtractor.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
A_ : Tuple = os.path.join(_a ,"""vocab.txt""" )
with open(_a ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
A_ : int = CustomTokenizer(_a )
A_ : str = CustomProcessor(_a ,_a )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'{USER}/test-dynamic-processor' ,token=self._token )
A_ : List[Any] = Repository(_a ,clone_from=f'{USER}/test-dynamic-processor' ,token=self._token )
processor.save_pretrained(_a )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map ,{
"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""",
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} ,)
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(_a ,"""tokenizer_config.json""" ) ) as f:
A_ : List[str] = json.load(_a )
self.assertDictEqual(
tokenizer_config["""auto_map"""] ,{
"""AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None],
"""AutoProcessor""": """custom_processing.CustomProcessor""",
} ,)
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(_a ,"""custom_feature_extraction.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(_a ,"""custom_tokenization.py""" ) ) )
self.assertTrue(os.path.isfile(os.path.join(_a ,"""custom_processing.py""" ) ) )
repo.push_to_hub()
A_ : Any = AutoProcessor.from_pretrained(f'{USER}/test-dynamic-processor' ,trust_remote_code=_a )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ ,"""CustomProcessor""" )
| 665 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""vqvae"""]
def __init__( self : Optional[Any] ,_a : AutoencoderKL ,_a : UNetaDConditionModel ,_a : Mel ,_a : Union[DDIMScheduler, DDPMScheduler] ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_a ,scheduler=_a ,mel=_a ,vqvae=_a )
def _a ( self : str ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_a ) else 1000
@torch.no_grad()
def __call__( self : Optional[int] ,_a : int = 1 ,_a : str = None ,_a : np.ndarray = None ,_a : int = 0 ,_a : int = 0 ,_a : int = None ,_a : torch.Generator = None ,_a : float = 0 ,_a : float = 0 ,_a : torch.Generator = None ,_a : float = 0 ,_a : torch.Tensor = None ,_a : torch.Tensor = None ,_a : int=True ,):
'''simple docstring'''
A_ : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_a )
A_ : Union[str, Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
A_ : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
A_ : int = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_a ,device=self.device ,)
A_ : List[Any] = noise
A_ : str = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_a ,_a )
A_ : Any = self.mel.audio_slice_to_image(_a )
A_ : Union[str, Any] = np.frombuffer(input_image.tobytes() ,dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
A_ : Optional[Any] = (input_image / 255) * 2 - 1
A_ : Union[str, Any] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
A_ : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(_a ,0 ) ).latent_dist.sample(
generator=_a )[0]
A_ : List[str] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
A_ : Any = self.scheduler.add_noise(_a ,_a ,self.scheduler.timesteps[start_step - 1] )
A_ : Tuple = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
A_ : Tuple = int(mask_start_secs * pixels_per_second )
A_ : str = int(mask_end_secs * pixels_per_second )
A_ : int = self.scheduler.add_noise(_a ,_a ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_a ):
A_ : Optional[Any] = self.unet(_a ,_a ,_a )["""sample"""]
else:
A_ : List[Any] = self.unet(_a ,_a )["""sample"""]
if isinstance(self.scheduler ,_a ):
A_ : Dict = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,eta=_a ,generator=_a ,)["""prev_sample"""]
else:
A_ : Any = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,generator=_a ,)["""prev_sample"""]
if mask is not None:
if mask_start > 0:
A_ : Tuple = mask[:, step, :, :mask_start]
if mask_end > 0:
A_ : List[str] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
A_ : str = 1 / self.vqvae.config.scaling_factor * images
A_ : Union[str, Any] = self.vqvae.decode(_a )["""sample"""]
A_ : int = (images / 2 + 0.5).clamp(0 ,1 )
A_ : str = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
A_ : Optional[int] = (images * 255).round().astype("""uint8""" )
A_ : List[Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_a ,mode="""RGB""" ).convert("""L""" ) for _ in images) )
A_ : Tuple = [self.mel.image_to_audio(_a ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_a )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_a ) )
@torch.no_grad()
def _a ( self : Union[str, Any] ,_a : List[Image.Image] ,_a : int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_a )
self.scheduler.set_timesteps(_a )
A_ : Optional[Any] = np.array(
[np.frombuffer(image.tobytes() ,dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
A_ : List[str] = (sample / 255) * 2 - 1
A_ : Optional[int] = torch.Tensor(_a ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
A_ : List[str] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
A_ : Any = self.scheduler.alphas_cumprod[t]
A_ : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
A_ : str = 1 - alpha_prod_t
A_ : List[str] = self.unet(_a ,_a )["""sample"""]
A_ : str = (1 - alpha_prod_t_prev) ** 0.5 * model_output
A_ : Union[str, Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
A_ : Optional[int] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _a ( _a : torch.Tensor ,_a : torch.Tensor ,_a : float ):
'''simple docstring'''
A_ : List[Any] = acos(torch.dot(torch.flatten(_a ) ,torch.flatten(_a ) ) / torch.norm(_a ) / torch.norm(_a ) )
return sin((1 - alpha) * theta ) * xa / sin(_a ) + sin(alpha * theta ) * xa / sin(_a )
| 665 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
return int((input_a, input_a).count(0) != 0)
def lowerCamelCase ( ):
assert nand_gate(0 , 0) == 1
assert nand_gate(0 , 1) == 1
assert nand_gate(1 , 0) == 1
assert nand_gate(1 , 1) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 665 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__magic_name__ = 16
__magic_name__ = 32
def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16):
A_ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""")
A_ : str = load_dataset("""glue""" , """mrpc""")
def tokenize_function(lowerCamelCase : Dict):
# max_length=None => use the model max length (it's actually the default)
A_ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A_ : Tuple = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(lowerCamelCase : Tuple):
# On TPU it's best to pad everything to the same length or training will be very slow.
A_ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A_ : List[Any] = 16
elif accelerator.mixed_precision != "no":
A_ : Any = 8
else:
A_ : Tuple = None
return tokenizer.pad(
lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
A_ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=lowerCamelCase)
A_ : str = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Dict):
# Initialize accelerator
A_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : List[Any] = config["""lr"""]
A_ : List[Any] = int(config["""num_epochs"""])
A_ : int = int(config["""seed"""])
A_ : Dict = int(config["""batch_size"""])
A_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""")
# If the batch size is too big we use gradient accumulation
A_ : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A_ : Any = batch_size // MAX_GPU_BATCH_SIZE
A_ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(lowerCamelCase)
A_ , A_ : List[str] = get_dataloaders(lowerCamelCase , lowerCamelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ : str = model.to(accelerator.device)
# Instantiate optimizer
A_ : str = AdamW(params=model.parameters() , lr=lowerCamelCase)
# Instantiate scheduler
A_ : Tuple = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ , A_ , A_ , A_ , A_ : Union[str, Any] = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Now we train the model
for epoch in range(lowerCamelCase):
model.train()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
A_ : Optional[int] = model(**lowerCamelCase)
A_ : List[Any] = outputs.loss
A_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
A_ : Union[str, Any] = model(**lowerCamelCase)
A_ : Any = outputs.logits.argmax(dim=-1)
A_ , A_ : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]))
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
A_ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase)
def lowerCamelCase ( ):
A_ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""")
parser.add_argument(
"""--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""")
A_ : Dict = parser.parse_args()
A_ : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 1 |
'''simple docstring'''
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,*_a : Any ,_a : List[Any]=None ,_a : Tuple=None ,**_a : List[str] ):
'''simple docstring'''
super().__init__(*_a ,**_a )
A_ : Optional[int] = eval_examples
A_ : Tuple = post_process_function
def _a ( self : Optional[Any] ,_a : Optional[Dataset] = None ,_a : Any=None ,_a : Optional[List[str]] = None ,_a : str = "eval" ,**_a : Any ,):
'''simple docstring'''
A_ : List[Any] = gen_kwargs.copy()
A_ : List[Any] = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
A_ : Tuple = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
A_ : str = gen_kwargs
A_ : Tuple = self.eval_dataset if eval_dataset is None else eval_dataset
A_ : Tuple = self.get_eval_dataloader(_a )
A_ : int = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
A_ : Any = self.compute_metrics
A_ : str = None
A_ : Optional[int] = time.time()
A_ : Union[str, Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A_ : Union[str, Any] = eval_loop(
_a ,description="""Evaluation""" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=_a ,metric_key_prefix=_a ,)
finally:
A_ : Any = compute_metrics
A_ : Optional[int] = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
_a ,_a ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size ) ,) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
A_ : Optional[int] = self.post_process_function(_a ,_a ,_a )
A_ : Any = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
A_ : Tuple = metrics.pop(_a )
metrics.update(output.metrics )
else:
A_ : Union[str, Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_a )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
A_ : Optional[Any] = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,_a )
return metrics
def _a ( self : List[Any] ,_a : str ,_a : List[Any] ,_a : str=None ,_a : str = "test" ,**_a : Union[str, Any] ):
'''simple docstring'''
A_ : int = gen_kwargs.copy()
A_ : Dict = self.get_test_dataloader(_a )
# Temporarily disable metric computation, we will do it in the loop here.
A_ : str = self.compute_metrics
A_ : Any = None
A_ : Optional[Any] = time.time()
A_ : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
A_ : List[Any] = eval_loop(
_a ,description="""Prediction""" ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=_a ,metric_key_prefix=_a ,)
finally:
A_ : List[str] = compute_metrics
A_ : Optional[Any] = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
_a ,_a ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size ) ,) )
if self.post_process_function is None or self.compute_metrics is None:
return output
A_ : Tuple = self.post_process_function(_a ,_a ,_a ,"""predict""" )
A_ : Tuple = self.compute_metrics(_a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
A_ : Optional[int] = metrics.pop(_a )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=_a )
| 665 |
'''simple docstring'''
import functools
def lowerCamelCase ( lowerCamelCase : list[int] , lowerCamelCase : list[int]):
# Validation
if not isinstance(lowerCamelCase , lowerCamelCase) or not all(isinstance(lowerCamelCase , lowerCamelCase) for day in days):
raise ValueError("""The parameter days should be a list of integers""")
if len(lowerCamelCase) != 3 or not all(isinstance(lowerCamelCase , lowerCamelCase) for cost in costs):
raise ValueError("""The parameter costs should be a list of three integers""")
if len(lowerCamelCase) == 0:
return 0
if min(lowerCamelCase) <= 0:
raise ValueError("""All days elements should be greater than 0""")
if max(lowerCamelCase) >= 366:
raise ValueError("""All days elements should be less than 366""")
A_ : Tuple = set(lowerCamelCase)
@functools.cache
def dynamic_programming(lowerCamelCase : int) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1)
return min(
costs[0] + dynamic_programming(index + 1) , costs[1] + dynamic_programming(index + 7) , costs[2] + dynamic_programming(index + 30) , )
return dynamic_programming(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : list[int | str]):
create_state_space_tree(lowerCamelCase , [] , 0 , [0 for i in range(len(lowerCamelCase))])
def lowerCamelCase ( lowerCamelCase : list[int | str] , lowerCamelCase : list[int | str] , lowerCamelCase : int , lowerCamelCase : list[int] , ):
if index == len(lowerCamelCase):
print(lowerCamelCase)
return
for i in range(len(lowerCamelCase)):
if not index_used[i]:
current_sequence.append(sequence[i])
A_ : Optional[int] = True
create_state_space_tree(lowerCamelCase , lowerCamelCase , index + 1 , lowerCamelCase)
current_sequence.pop()
A_ : List[Any] = False
__magic_name__ = [3, 1, 2, 4]
generate_all_permutations(sequence)
__magic_name__ = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 665 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCamelCase ( lowerCamelCase : NDArray[floataa] , lowerCamelCase : NDArray[floataa] , lowerCamelCase : list[int] , lowerCamelCase : int , ):
A_ , A_ : int = coefficient_matrix.shape
A_ , A_ : Union[str, Any] = constant_matrix.shape
if rowsa != colsa:
A_ : Any = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if colsa != 1:
A_ : Tuple = F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if rowsa != rowsa:
A_ : Dict = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(lowerCamelCase)
if len(lowerCamelCase) != rowsa:
A_ : Union[str, Any] = (
"""Number of initial values must be equal to number of rows in coefficient """
F'matrix but received {len(lowerCamelCase)} and {rowsa}'
)
raise ValueError(lowerCamelCase)
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""")
A_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1)
A_ , A_ : int = table.shape
strictly_diagonally_dominant(lowerCamelCase)
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase):
A_ : List[Any] = []
for row in range(lowerCamelCase):
A_ : int = 0
for col in range(lowerCamelCase):
if col == row:
A_ : List[str] = table[row][col]
elif col == cols - 1:
A_ : str = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ : Union[str, Any] = (temp + val) / denom
new_val.append(lowerCamelCase)
A_ : Tuple = new_val
return [float(lowerCamelCase) for i in new_val]
def lowerCamelCase ( lowerCamelCase : NDArray[floataa]):
A_ , A_ : Dict = table.shape
A_ : Union[str, Any] = True
for i in range(0 , lowerCamelCase):
A_ : str = 0
for j in range(0 , cols - 1):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""")
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : str):
return "".join(chr(ord(lowerCamelCase) - 32) if """a""" <= char <= """z""" else char for char in word)
if __name__ == "__main__":
from doctest import testmod
testmod()
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
A_ : Any = len(lowerCamelCase)
A_ : Optional[Any] = len(lowerCamelCase)
A_ : Optional[int] = [[False for _ in range(m + 1)] for _ in range(n + 1)]
A_ : Union[str, Any] = True
for i in range(lowerCamelCase):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A_ : Optional[int] = True
if a[i].islower():
A_ : List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]=10):
A_ : Optional[Any] = []
for _ in range(lowerCamelCase):
lrs.append(scheduler.get_lr()[0])
scheduler.step()
return lrs
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : str=10):
A_ : str = []
for step in range(lowerCamelCase):
lrs.append(scheduler.get_lr()[0])
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Optional[int] = os.path.join(lowerCamelCase , """schedule.bin""")
torch.save(scheduler.state_dict() , lowerCamelCase)
A_ : Dict = torch.load(lowerCamelCase)
scheduler.load_state_dict(lowerCamelCase)
return lrs
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Optional[int] ,_a : List[str] ,_a : Tuple ,_a : List[str] ):
'''simple docstring'''
self.assertEqual(len(_a ) ,len(_a ) )
for a, b in zip(_a ,_a ):
self.assertAlmostEqual(_a ,_a ,delta=_a )
def _a ( self : List[str] ):
'''simple docstring'''
A_ : List[Any] = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=_a )
A_ : Optional[int] = torch.tensor([0.4, 0.2, -0.5] )
A_ : Union[str, Any] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ : Optional[int] = AdamW(params=[w] ,lr=2e-1 ,weight_decay=0.0 )
for _ in range(100 ):
A_ : Union[str, Any] = criterion(_a ,_a )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1e-2 )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=_a )
A_ : str = torch.tensor([0.4, 0.2, -0.5] )
A_ : int = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ : List[str] = Adafactor(
params=[w] ,lr=1e-2 ,eps=(1e-30, 1e-3) ,clip_threshold=1.0 ,decay_rate=-0.8 ,betaa=_a ,weight_decay=0.0 ,relative_step=_a ,scale_parameter=_a ,warmup_init=_a ,)
for _ in range(1000 ):
A_ : Union[str, Any] = criterion(_a ,_a )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1e-2 )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ = nn.Linear(50 , 50 ) if is_torch_available() else None
a_ = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
a_ = 10
def _a ( self : Any ,_a : List[Any] ,_a : Optional[Any] ,_a : str ,_a : Optional[int]=None ):
'''simple docstring'''
self.assertEqual(len(_a ) ,len(_a ) )
for a, b in zip(_a ,_a ):
self.assertAlmostEqual(_a ,_a ,delta=_a ,msg=_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : List[str] = {"""num_warmup_steps""": 2, """num_training_steps""": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A_ : Optional[int] = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"""num_warmup_steps""": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, """num_cycles""": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, """power""": 2.0, """lr_end""": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"""num_warmup_steps""": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A_ , A_ : Any = data
A_ : Union[str, Any] = scheduler_func(self.optimizer ,**_a )
self.assertEqual(len([scheduler.get_lr()[0]] ) ,1 )
A_ : List[str] = unwrap_schedule(_a ,self.num_steps )
self.assertListAlmostEqual(
_a ,_a ,tol=1e-2 ,msg=f'failed for {scheduler_func} in normal scheduler' ,)
A_ : str = scheduler_func(self.optimizer ,**_a )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(_a ) # wrap to test picklability of the schedule
A_ : Tuple = unwrap_and_save_reload_schedule(_a ,self.num_steps )
self.assertListEqual(_a ,_a ,msg=f'failed for {scheduler_func} in save and reload' )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] ,_a : Optional[Any] ):
'''simple docstring'''
A_ : int = fn
def __call__( self : Tuple ,*_a : List[str] ,**_a : Dict ):
'''simple docstring'''
return self.fn(*_a ,**_a )
@classmethod
def _a ( self : List[Any] ,_a : int ):
'''simple docstring'''
A_ : List[str] = list(map(self ,scheduler.lr_lambdas ) )
| 665 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
a_ = 42
a_ = 42
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : list[list[Edge]] = [[] for _ in range(_a )]
A_ : List[Any] = size
def __getitem__( self : int ,_a : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _a ( self : str ):
'''simple docstring'''
return self._size
def _a ( self : str ,_a : int ,_a : int ,_a : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(_a ,_a ) )
def _a ( self : Dict ,_a : int ,_a : int ):
'''simple docstring'''
A_ : Tuple = deque([start_vertex] )
A_ : list[int | None] = [None] * self.size
A_ : Union[str, Any] = 0
while queue:
A_ : List[Any] = queue.popleft()
A_ : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A_ : Union[str, Any] = current_distance + edge.weight
A_ : Optional[Any] = distances[edge.destination_vertex]
if (
isinstance(_a ,_a )
and new_distance >= dest_vertex_distance
):
continue
A_ : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : list[list[float]]):
A_ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(lowerCamelCase):
if len(lowerCamelCase) < i + 1:
data_lists.append([])
data_lists[i].append(float(lowerCamelCase))
return data_lists
def lowerCamelCase ( lowerCamelCase : list[list[float]] , lowerCamelCase : list[int]):
A_ : list[list[float]] = []
for dlist, weight in zip(lowerCamelCase , lowerCamelCase):
A_ : int = min(lowerCamelCase)
A_ : Dict = max(lowerCamelCase)
A_ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)))
except ZeroDivisionError:
score.append(1)
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind))
except ZeroDivisionError:
score.append(0)
# weight not 0 or 1
else:
A_ : Optional[int] = F'Invalid weight of {weight:f} provided'
raise ValueError(lowerCamelCase)
score_lists.append(lowerCamelCase)
return score_lists
def lowerCamelCase ( lowerCamelCase : list[list[float]]):
A_ : list[float] = [0 for i in range(len(score_lists[0]))]
for slist in score_lists:
for j, ele in enumerate(lowerCamelCase):
A_ : List[str] = final_scores[j] + ele
return final_scores
def lowerCamelCase ( lowerCamelCase : list[list[float]] , lowerCamelCase : list[int]):
A_ : str = get_data(lowerCamelCase)
A_ : List[Any] = calculate_each_score(lowerCamelCase , lowerCamelCase)
A_ : Any = generate_final_scores(lowerCamelCase)
# append scores to source data
for i, ele in enumerate(lowerCamelCase):
source_data[i].append(lowerCamelCase)
return source_data
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int = 10**9):
A_ : Optional[int] = 1
A_ : int = 2
A_ : List[Any] = 0
A_ : Optional[Any] = 0
A_ : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
A_ : Optional[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 665 | 1 |
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __lowerCAmelCase ( yaml.SafeLoader ):
'''simple docstring'''
def _a ( self : Tuple ,_a : List[Any] ):
'''simple docstring'''
A_ : List[str] = [self.constructed_objects[key_node] for key_node, _ in node.value]
A_ : int = [tuple(_a ) if isinstance(_a ,_a ) else key for key in keys]
A_ : Tuple = Counter(_a )
A_ : List[Any] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'Got duplicate yaml keys: {duplicate_keys}' )
def _a ( self : Any ,_a : Any ,_a : Any=False ):
'''simple docstring'''
A_ : Dict = super().construct_mapping(_a ,deep=_a )
self._check_no_duplicates_on_constructed_node(_a )
return mapping
def lowerCamelCase ( lowerCamelCase : str):
A_ : str = list(readme_content.splitlines())
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
A_ : int = full_content[1:].index("""---""") + 1
A_ : str = """\n""".join(full_content[1:sep_idx])
return yamlblock, "\n".join(full_content[sep_idx + 1 :])
return None, "\n".join(lowerCamelCase)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def _a ( cls : Optional[int] ,_a : Path ):
'''simple docstring'''
with open(_a ,encoding="""utf-8""" ) as readme_file:
A_ , A_ : List[Any] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_a )
else:
return cls()
def _a ( self : str ,_a : Path ):
'''simple docstring'''
if path.exists():
with open(_a ,encoding="""utf-8""" ) as readme_file:
A_ : List[Any] = readme_file.read()
else:
A_ : List[Any] = None
A_ : Optional[Any] = self._to_readme(_a )
with open(_a ,"""w""" ,encoding="""utf-8""" ) as readme_file:
readme_file.write(_a )
def _a ( self : Any ,_a : Optional[str] = None ):
'''simple docstring'''
if readme_content is not None:
A_ , A_ : Optional[int] = _split_yaml_from_readme(_a )
A_ : List[str] = """---\n""" + self.to_yaml_string() + """---\n""" + content
else:
A_ : List[str] = """---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def _a ( cls : int ,_a : str ):
'''simple docstring'''
A_ : List[Any] = yaml.load(_a ,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
A_ : Union[str, Any] = {
(key.replace("""-""" ,"""_""" ) if key.replace("""-""" ,"""_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_a )
def _a ( self : Tuple ):
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""" ,"""-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} ,sort_keys=_a ,allow_unicode=_a ,encoding="""utf-8""" ,).decode("""utf-8""" )
__magic_name__ = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__magic_name__ = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
__magic_name__ = ap.parse_args()
__magic_name__ = Path(args.readme_filepath)
__magic_name__ = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 665 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase ( ):
A_ : Optional[int] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCamelCase)
A_ : Optional[int] = parser.add_subparsers(help="""accelerate command helpers""")
# Register commands
get_config_parser(subparsers=lowerCamelCase)
env_command_parser(subparsers=lowerCamelCase)
launch_command_parser(subparsers=lowerCamelCase)
tpu_command_parser(subparsers=lowerCamelCase)
test_command_parser(subparsers=lowerCamelCase)
# Let's go
A_ : Dict = parser.parse_args()
if not hasattr(lowerCamelCase , """func"""):
parser.print_help()
exit(1)
# Run
args.func(lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int = 10**12):
A_ : Optional[Any] = 1
A_ : Optional[Any] = 0
A_ : Any = 1
A_ : List[str] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f"""{solution() = }""")
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase ( ):
A_ : Optional[int] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCamelCase)
A_ : Optional[int] = parser.add_subparsers(help="""accelerate command helpers""")
# Register commands
get_config_parser(subparsers=lowerCamelCase)
env_command_parser(subparsers=lowerCamelCase)
launch_command_parser(subparsers=lowerCamelCase)
tpu_command_parser(subparsers=lowerCamelCase)
test_command_parser(subparsers=lowerCamelCase)
# Let's go
A_ : Dict = parser.parse_args()
if not hasattr(lowerCamelCase , """func"""):
parser.print_help()
exit(1)
# Run
args.func(lowerCamelCase)
if __name__ == "__main__":
main()
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['YolosFeatureExtractor']
__magic_name__ = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__magic_name__ = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : dict):
A_ : List[Any] = set()
# edges = list of graph's edges
A_ : Union[str, Any] = get_edges(lowerCamelCase)
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
A_ , A_ : Dict = edges.pop()
chosen_vertices.add(lowerCamelCase)
chosen_vertices.add(lowerCamelCase)
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowerCamelCase)
return chosen_vertices
def lowerCamelCase ( lowerCamelCase : dict):
A_ : int = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node))
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : str = [0] * len(lowerCamelCase)
A_ : Union[str, Any] = []
A_ : Union[str, Any] = []
A_ : Tuple = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCamelCase)):
if indegree[i] == 0:
queue.append(lowerCamelCase)
while queue:
A_ : Any = queue.pop(0)
cnt += 1
topo.append(lowerCamelCase)
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowerCamelCase)
if cnt != len(lowerCamelCase):
print("""Cycle exists""")
else:
print(lowerCamelCase)
# Adjacency List of Graph
__magic_name__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 665 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """audio-spectrogram-transformer"""
def __init__( self : Dict ,_a : Optional[Any]=768 ,_a : List[Any]=12 ,_a : Optional[int]=12 ,_a : List[Any]=3072 ,_a : int="gelu" ,_a : Optional[int]=0.0 ,_a : Dict=0.0 ,_a : Any=0.02 ,_a : Tuple=1e-12 ,_a : Any=16 ,_a : List[Any]=True ,_a : List[str]=10 ,_a : Tuple=10 ,_a : str=1024 ,_a : Union[str, Any]=128 ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Any = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Tuple = num_attention_heads
A_ : Optional[Any] = intermediate_size
A_ : Dict = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Any = initializer_range
A_ : Optional[Any] = layer_norm_eps
A_ : Tuple = patch_size
A_ : str = qkv_bias
A_ : List[str] = frequency_stride
A_ : Union[str, Any] = time_stride
A_ : Optional[Any] = max_length
A_ : str = num_mel_bins
| 665 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[str]=7 ,_a : Dict=True ,_a : List[Any]=True ,_a : Dict=False ,_a : Optional[int]=True ,_a : List[Any]=99 ,_a : Any=32 ,_a : Optional[int]=5 ,_a : List[Any]=4 ,_a : int=37 ,_a : List[Any]="gelu" ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : Any=512 ,_a : int=16 ,_a : Optional[int]=2 ,_a : Any=0.02 ,_a : Any=3 ,_a : Any=4 ,_a : List[str]=None ,):
'''simple docstring'''
A_ : List[str] = parent
A_ : Any = batch_size
A_ : Tuple = seq_length
A_ : List[str] = is_training
A_ : Tuple = use_input_mask
A_ : Dict = use_token_type_ids
A_ : List[Any] = use_labels
A_ : Union[str, Any] = vocab_size
A_ : Any = hidden_size
A_ : str = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : str = intermediate_size
A_ : Tuple = hidden_act
A_ : Any = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : int = type_vocab_size
A_ : Union[str, Any] = type_sequence_label_size
A_ : Any = initializer_range
A_ : List[Any] = num_labels
A_ : Optional[Any] = num_choices
A_ : List[Any] = scope
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : int = None
if self.use_input_mask:
A_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Dict = None
if self.use_token_type_ids:
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : str = None
A_ : Any = None
A_ : str = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
A_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
def _a ( self : Union[str, Any] ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Any ,_a : Any ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Tuple ):
'''simple docstring'''
A_ : Any = LlamaModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[Any] = model(_a ,attention_mask=_a )
A_ : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Optional[int] ,_a : int ,_a : List[str] ,_a : Any ,_a : Any ,_a : Dict ,_a : List[str] ,_a : Optional[int] ,_a : Any ,_a : List[str] ,):
'''simple docstring'''
A_ : List[str] = True
A_ : Union[str, Any] = LlamaModel(_a )
model.to(_a )
model.eval()
A_ : Tuple = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,)
A_ : List[Any] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,)
A_ : int = model(_a ,attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Any ,_a : Any ,_a : Optional[int] ,_a : List[Any] ,_a : List[Any] ,_a : Dict ,_a : Tuple ,_a : Optional[int] ,_a : List[Any] ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : List[Any] = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
A_ : Dict = model(_a ,attention_mask=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : str ,_a : List[Any] ,_a : Dict ,_a : str ,_a : Tuple ,_a : Tuple ,_a : Tuple ,_a : Optional[Any] ,_a : Dict ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : Optional[Any] = True
A_ : Any = True
A_ : Tuple = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
A_ : Optional[int] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,use_cache=_a ,)
A_ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A_ : List[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
A_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
A_ : int = torch.cat([input_mask, next_mask] ,dim=-1 )
A_ : List[str] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
A_ : Any = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,past_key_values=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
# select random slice
A_ : List[str] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
A_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a ,_a ,atol=1e-3 ) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Any = config_and_inputs
A_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
a_ = (LlamaForCausalLM,) if is_torch_available() else ()
a_ = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = LlamaModelTester(self )
A_ : List[str] = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def _a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : Dict = type
self.model_tester.create_and_check_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = 3
A_ : Any = input_dict["""input_ids"""]
A_ : Union[str, Any] = input_ids.ne(1 ).to(_a )
A_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : int = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Dict ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : str = 3
A_ : Union[str, Any] = """single_label_classification"""
A_ : Union[str, Any] = input_dict["""input_ids"""]
A_ : List[Any] = input_ids.ne(1 ).to(_a )
A_ : Dict = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : List[str] = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = 3
A_ : Dict = """multi_label_classification"""
A_ : Any = input_dict["""input_ids"""]
A_ : Optional[Any] = input_ids.ne(1 ).to(_a )
A_ : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
A_ : Optional[int] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def _a ( self : Any ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _a ( self : Optional[Any] ,_a : List[Any] ):
'''simple docstring'''
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ids_tensor([1, 10] ,config.vocab_size )
A_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : int = LlamaModel(_a )
original_model.to(_a )
original_model.eval()
A_ : Tuple = original_model(_a ).last_hidden_state
A_ : Union[str, Any] = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : Tuple = {"""type""": scaling_type, """factor""": 10.0}
A_ : int = LlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
A_ : List[Any] = scaled_model(_a ).last_hidden_state
A_ : Any = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a ,_a ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Any = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" ,device_map="""auto""" )
A_ : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : str ):
'''simple docstring'''
A_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""auto""" )
A_ : int = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : Optional[int] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
A_ : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
A_ : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
A_ : List[str] = """Simply put, the theory of relativity states that """
A_ : Any = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
A_ : Union[str, Any] = tokenizer.encode(_a ,return_tensors="""pt""" )
A_ : List[str] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""sequential""" ,use_safetensors=_a )
# greedy generation outputs
A_ : str = model.generate(_a ,max_new_tokens=64 ,top_p=_a ,temperature=1 ,do_sample=_a )
A_ : Optional[Any] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=_a )
self.assertEqual(_a ,_a )
| 665 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__magic_name__ = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__magic_name__ = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__magic_name__ = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def _a ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) ,homepage="""https://github.com/hendrycks/math""" ,codebase_urls=["""https://github.com/hendrycks/math"""] ,)
def _a ( self : List[Any] ,_a : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = 0.0
for i, j in zip(_a ,_a ):
n_correct += 1.0 if math_equivalence.is_equiv(_a ,_a ) else 0.0
A_ : List[str] = n_correct / len(_a )
return {
"accuracy": accuracy,
}
| 665 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['YolosFeatureExtractor']
__magic_name__ = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """retribert"""
def __init__( self : int ,_a : Dict=30522 ,_a : List[Any]=768 ,_a : Optional[Any]=8 ,_a : str=12 ,_a : str=3072 ,_a : Tuple="gelu" ,_a : Optional[int]=0.1 ,_a : Dict=0.1 ,_a : List[Any]=512 ,_a : Union[str, Any]=2 ,_a : Tuple=0.02 ,_a : List[str]=1e-12 ,_a : Dict=True ,_a : Tuple=128 ,_a : Optional[int]=0 ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,**_a )
A_ : Dict = vocab_size
A_ : int = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : int = intermediate_size
A_ : Tuple = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : Optional[int] = initializer_range
A_ : Dict = layer_norm_eps
A_ : str = share_encoders
A_ : List[Any] = projection_dim
| 665 | 1 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""image_processor""", """tokenizer"""]
a_ = """BlipImageProcessor"""
a_ = """AutoTokenizer"""
def __init__( self : Dict ,_a : Optional[Any] ,_a : Dict ,_a : Union[str, Any] ):
'''simple docstring'''
super().__init__(_a ,_a )
# add QFormer tokenizer
A_ : Dict = qformer_tokenizer
def __call__( self : Dict ,_a : ImageInput = None ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Tuple ,):
'''simple docstring'''
if images is None and text is None:
raise ValueError("""You have to specify at least images or text.""" )
A_ : Any = BatchFeature()
if text is not None:
A_ : Tuple = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_token_type_ids=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
encoding.update(_a )
A_ : List[Any] = self.qformer_tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_token_type_ids=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
A_ : List[Any] = qformer_text_encoding.pop("""input_ids""" )
A_ : Dict = qformer_text_encoding.pop("""attention_mask""" )
if images is not None:
A_ : List[Any] = self.image_processor(_a ,return_tensors=_a )
encoding.update(_a )
return encoding
def _a ( self : int ,*_a : List[str] ,**_a : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def _a ( self : List[Any] ,*_a : Optional[Any] ,**_a : Tuple ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Tuple = self.tokenizer.model_input_names
A_ : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _a ( self : Union[str, Any] ,_a : Union[str, Any] ,**_a : Dict ):
'''simple docstring'''
if os.path.isfile(_a ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(_a ,exist_ok=_a )
A_ : List[Any] = os.path.join(_a ,"""qformer_tokenizer""" )
self.qformer_tokenizer.save_pretrained(_a )
return super().save_pretrained(_a ,**_a )
@classmethod
def _a ( cls : Union[str, Any] ,_a : List[Any] ,**_a : Optional[int] ):
'''simple docstring'''
A_ : str = AutoTokenizer.from_pretrained(_a ,subfolder="""qformer_tokenizer""" )
A_ : Dict = cls._get_arguments_from_pretrained(_a ,**_a )
args.append(_a )
return cls(*_a )
| 665 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'spiece.model'}
__magic_name__ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
__magic_name__ = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = []
def __init__( self : Optional[int] ,_a : int ,_a : Optional[Any]="<unk>" ,_a : int="<s>" ,_a : str="</s>" ,_a : Optional[Any]="<pad>" ,_a : Tuple="[SEP]" ,_a : Tuple="[MASK]" ,_a : Union[str, Any]="[CLS]" ,_a : Optional[Dict[str, Any]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : List[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
A_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,pad_token=_a ,sep_token=_a ,mask_token=_a ,cls_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
A_ : Optional[int] = vocab_file
A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Tuple = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self.__dict__.copy()
A_ : Union[str, Any] = None
return state
def __setstate__( self : List[Any] ,_a : Any ):
'''simple docstring'''
A_ : Tuple = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
A_ : Tuple = {}
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self : Union[str, Any] ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def _a ( self : Optional[int] ,_a : str ):
'''simple docstring'''
return self.sp_model.piece_to_id(_a )
def _a ( self : int ,_a : Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.sp_model.IdToPiece(_a )
return token
def _a ( self : Dict ,_a : int ):
'''simple docstring'''
A_ : int = []
A_ : Any = """"""
A_ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
A_ : Dict = True
A_ : Union[str, Any] = []
else:
current_sub_tokens.append(_a )
A_ : str = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def _a ( self : int ,_a : List[int] ,_a : bool = False ,_a : bool = None ,_a : bool = True ,**_a : str ,):
'''simple docstring'''
A_ : Any = kwargs.pop("""use_source_tokenizer""" ,_a )
A_ : Union[str, Any] = self.convert_ids_to_tokens(_a ,skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A_ : str = []
A_ : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
A_ : List[str] = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
A_ : Optional[int] = re.sub(r""" (\[(MASK|SEP)\])""" ,r"""\1""" ,""" """.join(_a ) )
else:
A_ : Tuple = """""".join(_a )
A_ : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A_ : Optional[Any] = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,"""wb""" ) as fi:
A_ : str = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def _a ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : List[Any] = [self.cls_token_id]
A_ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def _a ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Tuple = [self.sep_token_id]
A_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 665 | 1 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any ,_a : Optional[int] ,_a : Tuple=13 ,_a : Optional[int]=7 ,_a : List[str]=True ,_a : Optional[int]=True ,_a : str=True ,_a : Dict=True ,_a : List[Any]=99 ,_a : List[str]=64 ,_a : List[str]=32 ,_a : Tuple=5 ,_a : str=4 ,_a : List[Any]=37 ,_a : Union[str, Any]="gelu" ,_a : Dict=0.1 ,_a : Dict=0.1 ,_a : str=512 ,_a : List[str]=16 ,_a : Optional[Any]=2 ,_a : List[str]=0.02 ,_a : Optional[Any]=3 ,_a : List[Any]=4 ,_a : Optional[int]=None ,):
'''simple docstring'''
A_ : Dict = parent
A_ : Optional[Any] = batch_size
A_ : Dict = seq_length
A_ : List[str] = is_training
A_ : Optional[int] = use_input_mask
A_ : Dict = use_token_type_ids
A_ : Optional[Any] = use_labels
A_ : Dict = vocab_size
A_ : List[Any] = hidden_size
A_ : Union[str, Any] = embedding_size
A_ : List[Any] = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : Union[str, Any] = intermediate_size
A_ : Union[str, Any] = hidden_act
A_ : int = hidden_dropout_prob
A_ : Union[str, Any] = attention_probs_dropout_prob
A_ : Tuple = max_position_embeddings
A_ : List[str] = type_vocab_size
A_ : List[Any] = type_sequence_label_size
A_ : List[str] = initializer_range
A_ : Any = num_labels
A_ : Dict = num_choices
A_ : Union[str, Any] = scope
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : Optional[int] = None
if self.use_input_mask:
A_ : str = random_attention_mask([self.batch_size, self.seq_length] )
A_ : List[str] = None
if self.use_token_type_ids:
A_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : Any = None
A_ : List[Any] = None
A_ : int = None
if self.use_labels:
A_ : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Optional[Any] = ids_tensor([self.batch_size] ,self.num_choices )
A_ : Optional[int] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Tuple ):
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
def _a ( self : List[str] ,_a : Any ,_a : Optional[int] ,_a : int ,_a : Optional[int] ,_a : int ,_a : List[Any] ,_a : str ):
'''simple docstring'''
A_ : str = MegatronBertModel(config=_a )
model.to(_a )
model.eval()
A_ : List[str] = model(_a ,attention_mask=_a ,token_type_ids=_a )
A_ : List[Any] = model(_a ,token_type_ids=_a )
A_ : Union[str, Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _a ( self : List[str] ,_a : List[str] ,_a : Union[str, Any] ,_a : Union[str, Any] ,_a : Any ,_a : Any ,_a : Union[str, Any] ,_a : List[str] ):
'''simple docstring'''
A_ : Optional[Any] = MegatronBertForMaskedLM(config=_a )
model.to(_a )
model.eval()
A_ : List[str] = model(_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Union[str, Any] ,_a : int ,_a : str ,_a : Tuple ,_a : Any ,_a : Any ,_a : Union[str, Any] ,_a : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[Any] = MegatronBertForCausalLM(config=_a )
model.to(_a )
model.eval()
A_ : Dict = model(_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Union[str, Any] ,_a : Dict ,_a : Optional[int] ,_a : int ,_a : Optional[Any] ,_a : Any ,_a : Any ,_a : Optional[Any] ):
'''simple docstring'''
A_ : List[Any] = MegatronBertForNextSentencePrediction(config=_a )
model.to(_a )
model.eval()
A_ : Tuple = model(
_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) )
def _a ( self : Any ,_a : Tuple ,_a : List[Any] ,_a : str ,_a : List[str] ,_a : int ,_a : Optional[Any] ,_a : int ):
'''simple docstring'''
A_ : List[Any] = MegatronBertForPreTraining(config=_a )
model.to(_a )
model.eval()
A_ : List[str] = model(
_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a ,next_sentence_label=_a ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) )
def _a ( self : List[str] ,_a : Tuple ,_a : Any ,_a : Optional[Any] ,_a : Union[str, Any] ,_a : Union[str, Any] ,_a : int ,_a : Dict ):
'''simple docstring'''
A_ : Optional[Any] = MegatronBertForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
A_ : str = model(
_a ,attention_mask=_a ,token_type_ids=_a ,start_positions=_a ,end_positions=_a ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _a ( self : List[str] ,_a : str ,_a : Any ,_a : Union[str, Any] ,_a : Any ,_a : Dict ,_a : Tuple ,_a : Any ):
'''simple docstring'''
A_ : int = self.num_labels
A_ : List[Any] = MegatronBertForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : str = model(_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _a ( self : List[Any] ,_a : List[str] ,_a : Union[str, Any] ,_a : List[str] ,_a : Dict ,_a : Optional[int] ,_a : Tuple ,_a : Dict ):
'''simple docstring'''
A_ : Any = self.num_labels
A_ : List[Any] = MegatronBertForTokenClassification(config=_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Any ,_a : Tuple ,_a : Union[str, Any] ,_a : Dict ,_a : Union[str, Any] ,_a : Union[str, Any] ,_a : Tuple ,_a : Any ):
'''simple docstring'''
A_ : Optional[int] = self.num_choices
A_ : Optional[Any] = MegatronBertForMultipleChoice(config=_a )
model.to(_a )
model.eval()
A_ : Dict = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A_ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A_ : str = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A_ : Optional[Any] = model(
_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _a ( self : str ):
'''simple docstring'''
A_ : Dict = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : List[Any] = config_and_inputs
A_ : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a_ = (
{
"""feature-extraction""": MegatronBertModel,
"""fill-mask""": MegatronBertForMaskedLM,
"""question-answering""": MegatronBertForQuestionAnswering,
"""text-classification""": MegatronBertForSequenceClassification,
"""text-generation""": MegatronBertForCausalLM,
"""token-classification""": MegatronBertForTokenClassification,
"""zero-shot""": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = True
# test_resize_embeddings = False
a_ = False
def _a ( self : Tuple ,_a : Optional[Any] ,_a : List[str] ,_a : Union[str, Any]=False ):
'''simple docstring'''
A_ : str = super()._prepare_for_class(_a ,_a ,return_labels=_a )
if return_labels:
if model_class in get_values(_a ):
A_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=_a )
A_ : List[str] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_a )
return inputs_dict
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : str = MegatronBertModelTester(self )
A_ : Union[str, Any] = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def _a ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Any ):
'''simple docstring'''
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*_a )
def _a ( self : str ):
'''simple docstring'''
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_a )
def _a ( self : int ):
'''simple docstring'''
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_a )
def _a ( self : List[str] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_a )
def _a ( self : Dict ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*_a )
def _a ( self : int ):
'''simple docstring'''
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*_a )
def _a ( self : str ):
'''simple docstring'''
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_a )
def _a ( self : Dict ):
'''simple docstring'''
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*_a )
def lowerCamelCase ( lowerCamelCase : Tuple):
return torch.tensor(
lowerCamelCase , dtype=torch.long , device=lowerCamelCase , )
__magic_name__ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip("""Model is not available.""" )
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Dict = """nvidia/megatron-bert-uncased-345m"""
if "MYDIR" in os.environ:
A_ : int = os.path.join(os.environ["""MYDIR"""] ,_a )
A_ : List[str] = MegatronBertModel.from_pretrained(_a )
model.to(_a )
model.half()
A_ : Any = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
A_ : Optional[Any] = model(_a )[0]
A_ : List[Any] = torch.Size((1, 9, 1024) )
self.assertEqual(output.shape ,_a )
A_ : Dict = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
A_ : List[Any] = output[0, ii, jj]
A_ : Optional[Any] = expected[3 * ii + jj]
A_ : Any = """ii={} jj={} a={} b={}""".format(_a ,_a ,_a ,_a )
self.assertTrue(math.isclose(_a ,_a ,rel_tol=_a ,abs_tol=_a ) ,msg=_a )
| 665 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _a ( self : List[str] ,_a : int ,_a : Any ,_a : int ):
'''simple docstring'''
A_ : Dict = TextaTextGenerationPipeline(model=_a ,tokenizer=_a )
return generator, ["Something to write", "Something else"]
def _a ( self : str ,_a : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : Any = generator("""Something there""" )
self.assertEqual(_a ,[{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
A_ : List[Any] = generator(["""This is great !""", """Something else"""] ,num_return_sequences=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
A_ : List[str] = generator(
["""This is great !""", """Something else"""] ,num_return_sequences=2 ,batch_size=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
with self.assertRaises(_a ):
generator(4 )
@require_torch
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : int = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""pt""" )
# do_sample=False necessary for reproducibility
A_ : Tuple = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
A_ : Optional[int] = 3
A_ : Tuple = generator(
"""Something there""" ,num_return_sequences=_a ,num_beams=_a ,)
A_ : Optional[Any] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a ,_a )
A_ : Optional[int] = generator("""This is a test""" ,do_sample=_a ,num_return_sequences=2 ,return_tensors=_a )
self.assertEqual(
_a ,[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] ,)
A_ : Dict = generator.model.config.eos_token_id
A_ : Optional[int] = """<pad>"""
A_ : List[Any] = generator(
["""This is a test""", """This is a second test"""] ,do_sample=_a ,num_return_sequences=2 ,batch_size=2 ,return_tensors=_a ,)
self.assertEqual(
_a ,[
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] ,)
@require_tf
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""tf""" )
# do_sample=False necessary for reproducibility
A_ : Dict = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
| 665 | 1 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__magic_name__ = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Tuple=None):
require_version(deps[pkg] , lowerCamelCase)
| 665 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """gpt_bigcode"""
a_ = ["""past_key_values"""]
a_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int] ,_a : Optional[int]=50257 ,_a : Dict=1024 ,_a : Union[str, Any]=768 ,_a : Union[str, Any]=12 ,_a : Union[str, Any]=12 ,_a : Tuple=None ,_a : int="gelu_pytorch_tanh" ,_a : Optional[Any]=0.1 ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : List[Any]=1e-5 ,_a : List[str]=0.02 ,_a : Any=True ,_a : Union[str, Any]=True ,_a : Tuple=50256 ,_a : Optional[int]=50256 ,_a : int=True ,_a : Optional[int]=True ,_a : Optional[int]=True ,**_a : List[str] ,):
'''simple docstring'''
A_ : Optional[Any] = vocab_size
A_ : int = n_positions
A_ : Union[str, Any] = n_embd
A_ : int = n_layer
A_ : Optional[int] = n_head
A_ : Union[str, Any] = n_inner
A_ : List[Any] = activation_function
A_ : Dict = resid_pdrop
A_ : int = embd_pdrop
A_ : Optional[int] = attn_pdrop
A_ : Union[str, Any] = layer_norm_epsilon
A_ : int = initializer_range
A_ : Union[str, Any] = scale_attn_weights
A_ : List[str] = use_cache
A_ : Tuple = attention_softmax_in_fpaa
A_ : List[str] = scale_attention_softmax_in_fpaa
A_ : Union[str, Any] = multi_query
A_ : Any = bos_token_id
A_ : Optional[int] = eos_token_id
super().__init__(bos_token_id=_a ,eos_token_id=_a ,**_a )
| 665 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__magic_name__ = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__magic_name__ = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase ( ):
A_ : Union[str, Any] = (
list(range(ord("""!""") , ord("""~""") + 1)) + list(range(ord("""¡""") , ord("""¬""") + 1)) + list(range(ord("""®""") , ord("""ÿ""") + 1))
)
A_ : Optional[Any] = bs[:]
A_ : List[str] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase)
cs.append(2**8 + n)
n += 1
A_ : List[Any] = [chr(lowerCamelCase) for n in cs]
return dict(zip(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int):
A_ : int = set()
A_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
A_ : List[str] = char
return pairs
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : int ,_a : Tuple ,_a : Union[str, Any] ,_a : Optional[Any]="replace" ,_a : Union[str, Any]="<s>" ,_a : Union[str, Any]="</s>" ,_a : int="</s>" ,_a : List[str]="<s>" ,_a : List[Any]="<unk>" ,_a : Any="<pad>" ,_a : Dict="<mask>" ,_a : Optional[int]=False ,**_a : List[Any] ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
super().__init__(
errors=_a ,bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,**_a ,)
with open(_a ,encoding="""utf-8""" ) as vocab_handle:
A_ : str = json.load(_a )
A_ : Optional[int] = {v: k for k, v in self.encoder.items()}
A_ : List[str] = errors # how to handle errors in decoding
A_ : List[str] = bytes_to_unicode()
A_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_a ,encoding="""utf-8""" ) as merges_handle:
A_ : Any = merges_handle.read().split("""\n""" )[1:-1]
A_ : str = [tuple(merge.split() ) for merge in bpe_merges]
A_ : int = dict(zip(_a ,range(len(_a ) ) ) )
A_ : List[Any] = {}
A_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A_ : Optional[Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def _a ( self : Any ):
'''simple docstring'''
return len(self.encoder )
def _a ( self : str ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def _a ( self : int ,_a : int ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A_ : Optional[int] = tuple(_a )
A_ : Any = get_pairs(_a )
if not pairs:
return token
while True:
A_ : Optional[Any] = min(_a ,key=lambda _a : self.bpe_ranks.get(_a ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A_ , A_ : Dict = bigram
A_ : int = []
A_ : Optional[Any] = 0
while i < len(_a ):
try:
A_ : List[str] = word.index(_a ,_a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A_ : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A_ : str = tuple(_a )
A_ : str = new_word
if len(_a ) == 1:
break
else:
A_ : int = get_pairs(_a )
A_ : Optional[int] = """ """.join(_a )
A_ : List[str] = word
return word
def _a ( self : Dict ,_a : Optional[int] ):
'''simple docstring'''
A_ : Any = []
for token in re.findall(self.pat ,_a ):
A_ : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(""" """ ) )
return bpe_tokens
def _a ( self : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
return self.encoder.get(_a ,self.encoder.get(self.unk_token ) )
def _a ( self : int ,_a : Dict ):
'''simple docstring'''
return self.decoder.get(_a )
def _a ( self : Optional[int] ,_a : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = """""".join(_a )
A_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_a ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_a ,ensure_ascii=_a ) + """\n""" )
A_ : int = 0
with open(_a ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
A_ : Dict = token_index
writer.write(""" """.join(_a ) + """\n""" )
index += 1
return vocab_file, merge_file
def _a ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : int = [self.cls_token_id]
A_ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Union[str, Any] = [self.sep_token_id]
A_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : str ,_a : Optional[int] ,_a : Union[str, Any]=False ,**_a : Dict ):
'''simple docstring'''
A_ : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
A_ : Optional[int] = """ """ + text
return (text, kwargs)
| 665 | 1 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__magic_name__ = 16
__magic_name__ = 32
def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16):
A_ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""")
A_ : str = load_dataset("""glue""" , """mrpc""")
def tokenize_function(lowerCamelCase : Dict):
# max_length=None => use the model max length (it's actually the default)
A_ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A_ : Tuple = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(lowerCamelCase : Tuple):
# On TPU it's best to pad everything to the same length or training will be very slow.
A_ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A_ : List[Any] = 16
elif accelerator.mixed_precision != "no":
A_ : Any = 8
else:
A_ : Tuple = None
return tokenizer.pad(
lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
A_ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=lowerCamelCase)
A_ : str = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Dict):
# Initialize accelerator
A_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : List[Any] = config["""lr"""]
A_ : List[Any] = int(config["""num_epochs"""])
A_ : int = int(config["""seed"""])
A_ : Dict = int(config["""batch_size"""])
A_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""")
# If the batch size is too big we use gradient accumulation
A_ : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A_ : Any = batch_size // MAX_GPU_BATCH_SIZE
A_ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(lowerCamelCase)
A_ , A_ : List[str] = get_dataloaders(lowerCamelCase , lowerCamelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ : str = model.to(accelerator.device)
# Instantiate optimizer
A_ : str = AdamW(params=model.parameters() , lr=lowerCamelCase)
# Instantiate scheduler
A_ : Tuple = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ , A_ , A_ , A_ , A_ : Union[str, Any] = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Now we train the model
for epoch in range(lowerCamelCase):
model.train()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
A_ : Optional[int] = model(**lowerCamelCase)
A_ : List[Any] = outputs.loss
A_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
A_ : Union[str, Any] = model(**lowerCamelCase)
A_ : Any = outputs.logits.argmax(dim=-1)
A_ , A_ : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]))
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
A_ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase)
def lowerCamelCase ( ):
A_ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""")
parser.add_argument(
"""--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""")
A_ : Dict = parser.parse_args()
A_ : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase)
if __name__ == "__main__":
main()
| 665 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.txt'}
__magic_name__ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__magic_name__ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__magic_name__ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ConvBertTokenizer
def __init__( self : str ,_a : Dict=None ,_a : List[Any]=None ,_a : Dict=True ,_a : List[str]="[UNK]" ,_a : Any="[SEP]" ,_a : str="[PAD]" ,_a : List[Any]="[CLS]" ,_a : List[str]="[MASK]" ,_a : Union[str, Any]=True ,_a : Any=None ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
A_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_a ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_a ) != tokenize_chinese_chars
):
A_ : Dict = getattr(_a ,normalizer_state.pop("""type""" ) )
A_ : str = do_lower_case
A_ : Any = strip_accents
A_ : int = tokenize_chinese_chars
A_ : Tuple = normalizer_class(**_a )
A_ : Any = do_lower_case
def _a ( self : List[Any] ,_a : List[Any] ,_a : Any=None ):
'''simple docstring'''
A_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : int = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : List[Any] = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 665 | 1 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : Optional[int]):
return [
int(1000 * (box[0] / width)),
int(1000 * (box[1] / height)),
int(1000 * (box[2] / width)),
int(1000 * (box[3] / height)),
]
def lowerCamelCase ( lowerCamelCase : np.ndarray , lowerCamelCase : Optional[str] , lowerCamelCase : Optional[str] = None):
A_ : Union[str, Any] = tesseract_config if tesseract_config is not None else """"""
# apply OCR
A_ : str = to_pil_image(lowerCamelCase)
A_ , A_ : Any = pil_image.size
A_ : Union[str, Any] = pytesseract.image_to_data(lowerCamelCase , lang=lowerCamelCase , output_type="""dict""" , config=lowerCamelCase)
A_ , A_ , A_ , A_ , A_ : Tuple = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
A_ : Optional[Any] = [idx for idx, word in enumerate(lowerCamelCase) if not word.strip()]
A_ : Dict = [word for idx, word in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
A_ : List[Any] = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
A_ : Tuple = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
A_ : Tuple = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
A_ : Dict = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
A_ : str = []
for x, y, w, h in zip(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase):
A_ : Optional[Any] = [x, y, x + w, y + h]
actual_boxes.append(lowerCamelCase)
# finally, normalize the bounding boxes
A_ : Any = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCamelCase , lowerCamelCase , lowerCamelCase))
assert len(lowerCamelCase) == len(lowerCamelCase), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""pixel_values"""]
def __init__( self : int ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BILINEAR ,_a : bool = True ,_a : Optional[str] = None ,_a : Optional[str] = "" ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Optional[int] = size if size is not None else {"""height""": 224, """width""": 224}
A_ : Optional[int] = get_size_dict(_a )
A_ : int = do_resize
A_ : Optional[Any] = size
A_ : Union[str, Any] = resample
A_ : Union[str, Any] = apply_ocr
A_ : int = ocr_lang
A_ : Any = tesseract_config
def _a ( self : Optional[int] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BILINEAR ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : Optional[Any] = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
A_ : Tuple = (size["""height"""], size["""width"""])
return resize(_a ,size=_a ,resample=_a ,data_format=_a ,**_a )
def _a ( self : Optional[int] ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : Optional[str] = None ,_a : Optional[str] = None ,_a : Optional[Union[str, TensorType]] = None ,_a : ChannelDimension = ChannelDimension.FIRST ,**_a : int ,):
'''simple docstring'''
A_ : str = do_resize if do_resize is not None else self.do_resize
A_ : Optional[Any] = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(_a )
A_ : List[Any] = resample if resample is not None else self.resample
A_ : str = apply_ocr if apply_ocr is not None else self.apply_ocr
A_ : List[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
A_ : Any = tesseract_config if tesseract_config is not None else self.tesseract_config
A_ : Tuple = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
A_ : str = [to_numpy_array(_a ) for image in images]
if apply_ocr:
requires_backends(self ,"""pytesseract""" )
A_ : List[str] = []
A_ : Union[str, Any] = []
for image in images:
A_ , A_ : Dict = apply_tesseract(_a ,_a ,_a )
words_batch.append(_a )
boxes_batch.append(_a )
if do_resize:
A_ : List[str] = [self.resize(image=_a ,size=_a ,resample=_a ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
A_ : List[Any] = [flip_channel_order(_a ) for image in images]
A_ : Dict = [to_channel_dimension_format(_a ,_a ) for image in images]
A_ : Optional[Any] = BatchFeature(data={"""pixel_values""": images} ,tensor_type=_a )
if apply_ocr:
A_ : Optional[Any] = words_batch
A_ : int = boxes_batch
return data
| 665 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__magic_name__ = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = BartTokenizer
def __init__( self : str ,_a : Any=None ,_a : Optional[int]=None ,_a : int=None ,_a : Optional[int]="replace" ,_a : Dict="<s>" ,_a : Optional[Any]="</s>" ,_a : Dict="</s>" ,_a : Tuple="<s>" ,_a : Optional[Any]="<unk>" ,_a : List[str]="<pad>" ,_a : int="<mask>" ,_a : str=False ,_a : List[str]=True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(
_a ,_a ,tokenizer_file=_a ,errors=_a ,bos_token=_a ,eos_token=_a ,sep_token=_a ,cls_token=_a ,unk_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,trim_offsets=_a ,**_a ,)
A_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : List[str] = getattr(_a ,pre_tok_state.pop("""type""" ) )
A_ : Optional[int] = add_prefix_space
A_ : int = pre_tok_class(**_a )
A_ : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ : str = """post_processor"""
A_ : List[Any] = getattr(self.backend_tokenizer ,_a ,_a )
if tokenizer_component_instance:
A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : Tuple = tuple(state["""sep"""] )
if "cls" in state:
A_ : Tuple = tuple(state["""cls"""] )
A_ : List[str] = False
if state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : Dict = add_prefix_space
A_ : Any = True
if state.get("""trim_offsets""" ,_a ) != trim_offsets:
A_ : Union[str, Any] = trim_offsets
A_ : List[Any] = True
if changes_to_apply:
A_ : Optional[int] = getattr(_a ,state.pop("""type""" ) )
A_ : Tuple = component_class(**_a )
setattr(self.backend_tokenizer ,_a ,_a )
@property
def _a ( self : List[str] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _a ( self : Union[str, Any] ,_a : Any ):
'''simple docstring'''
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else value
A_ : List[Any] = value
def _a ( self : str ,*_a : str ,**_a : Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*_a ,**_a )
def _a ( self : str ,*_a : List[Any] ,**_a : str ):
'''simple docstring'''
A_ : List[str] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*_a ,**_a )
def _a ( self : Optional[int] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : str = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
def _a ( self : str ,_a : Optional[int] ,_a : int=None ):
'''simple docstring'''
A_ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Dict = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 665 | 1 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """"""
a_ = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self : Optional[int] ,_a : Optional[DatasetInfo] = None ,_a : Optional[str] = None ,**_a : Any ,):
'''simple docstring'''
super().__init__(self ,**_a )
A_ : int = repo_info
A_ : Optional[Any] = token
A_ : Dict = None
def _a ( self : List[Any] ):
'''simple docstring'''
if self.dir_cache is None:
A_ : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
A_ : List[str] = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(_a ): {"""name""": str(_a ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _a ( self : List[str] ,_a : str ,_a : str = "rb" ,**_a : Dict ,):
'''simple docstring'''
if not isinstance(self.repo_info ,_a ):
raise NotImplementedError(f'Open is only implemented for dataset repositories, but got {self.repo_info}' )
A_ : Any = hf_hub_url(self.repo_info.id ,_a ,revision=self.repo_info.sha )
return fsspec.open(
_a ,mode=_a ,headers=get_authentication_headers_for_url(_a ,use_auth_token=self.token ) ,client_kwargs={"""trust_env""": True} ,).open()
def _a ( self : Dict ,_a : Tuple ,**_a : Any ):
'''simple docstring'''
self._get_dirs()
A_ : Tuple = self._strip_protocol(_a )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_a )
def _a ( self : Union[str, Any] ,_a : List[Any] ,_a : str=False ,**_a : str ):
'''simple docstring'''
self._get_dirs()
A_ : Any = PurePosixPath(path.strip("""/""" ) )
A_ : str = {}
for p, f in self.dir_cache.items():
A_ : Any = PurePosixPath(p.strip("""/""" ) )
A_ : int = p.parent
if root == path:
A_ : int = f
A_ : int = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 665 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
A_ : int = TapasConfig.from_json_file(lowerCamelCase)
# set absolute/relative position embeddings parameter
A_ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
A_ : Optional[int] = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WTQ":
# run_task_main.py hparams
A_ : Tuple = 4
A_ : Optional[Any] = True
# hparam_utils.py hparams
A_ : Any = 0.66_4694
A_ : str = 0.20_7951
A_ : Any = 0.12_1194
A_ : str = True
A_ : Dict = True
A_ : int = False
A_ : int = 0.035_2513
A_ : Tuple = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
A_ : int = 4
A_ : Union[str, Any] = False
# hparam_utils.py hparams
A_ : Dict = 36.4519
A_ : List[Any] = 0.90_3421
A_ : Any = 222.088
A_ : Optional[Any] = True
A_ : Optional[int] = True
A_ : Optional[Any] = True
A_ : Optional[int] = 0.76_3141
A_ : Any = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "TABFACT":
A_ : Any = TapasForSequenceClassification(config=lowerCamelCase)
elif task == "MLM":
A_ : List[Any] = TapasForMaskedLM(config=lowerCamelCase)
elif task == "INTERMEDIATE_PRETRAINING":
A_ : Union[str, Any] = TapasModel(config=lowerCamelCase)
else:
raise ValueError(F'Task {task} not supported.')
print(F'Building PyTorch model from configuration: {config}')
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}')
model.save_pretrained(lowerCamelCase)
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}')
A_ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512)
tokenizer.save_pretrained(lowerCamelCase)
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
a_ = 42
a_ = 42
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : list[list[Edge]] = [[] for _ in range(_a )]
A_ : List[Any] = size
def __getitem__( self : int ,_a : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _a ( self : str ):
'''simple docstring'''
return self._size
def _a ( self : str ,_a : int ,_a : int ,_a : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(_a ,_a ) )
def _a ( self : Dict ,_a : int ,_a : int ):
'''simple docstring'''
A_ : Tuple = deque([start_vertex] )
A_ : list[int | None] = [None] * self.size
A_ : Union[str, Any] = 0
while queue:
A_ : List[Any] = queue.popleft()
A_ : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A_ : Union[str, Any] = current_distance + edge.weight
A_ : Optional[Any] = distances[edge.destination_vertex]
if (
isinstance(_a ,_a )
and new_distance >= dest_vertex_distance
):
continue
A_ : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""vqvae"""]
def __init__( self : Optional[Any] ,_a : AutoencoderKL ,_a : UNetaDConditionModel ,_a : Mel ,_a : Union[DDIMScheduler, DDPMScheduler] ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_a ,scheduler=_a ,mel=_a ,vqvae=_a )
def _a ( self : str ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_a ) else 1000
@torch.no_grad()
def __call__( self : Optional[int] ,_a : int = 1 ,_a : str = None ,_a : np.ndarray = None ,_a : int = 0 ,_a : int = 0 ,_a : int = None ,_a : torch.Generator = None ,_a : float = 0 ,_a : float = 0 ,_a : torch.Generator = None ,_a : float = 0 ,_a : torch.Tensor = None ,_a : torch.Tensor = None ,_a : int=True ,):
'''simple docstring'''
A_ : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_a )
A_ : Union[str, Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
A_ : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
A_ : int = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_a ,device=self.device ,)
A_ : List[Any] = noise
A_ : str = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_a ,_a )
A_ : Any = self.mel.audio_slice_to_image(_a )
A_ : Union[str, Any] = np.frombuffer(input_image.tobytes() ,dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
A_ : Optional[Any] = (input_image / 255) * 2 - 1
A_ : Union[str, Any] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
A_ : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(_a ,0 ) ).latent_dist.sample(
generator=_a )[0]
A_ : List[str] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
A_ : Any = self.scheduler.add_noise(_a ,_a ,self.scheduler.timesteps[start_step - 1] )
A_ : Tuple = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
A_ : Tuple = int(mask_start_secs * pixels_per_second )
A_ : str = int(mask_end_secs * pixels_per_second )
A_ : int = self.scheduler.add_noise(_a ,_a ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_a ):
A_ : Optional[Any] = self.unet(_a ,_a ,_a )["""sample"""]
else:
A_ : List[Any] = self.unet(_a ,_a )["""sample"""]
if isinstance(self.scheduler ,_a ):
A_ : Dict = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,eta=_a ,generator=_a ,)["""prev_sample"""]
else:
A_ : Any = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,generator=_a ,)["""prev_sample"""]
if mask is not None:
if mask_start > 0:
A_ : Tuple = mask[:, step, :, :mask_start]
if mask_end > 0:
A_ : List[str] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
A_ : str = 1 / self.vqvae.config.scaling_factor * images
A_ : Union[str, Any] = self.vqvae.decode(_a )["""sample"""]
A_ : int = (images / 2 + 0.5).clamp(0 ,1 )
A_ : str = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
A_ : Optional[int] = (images * 255).round().astype("""uint8""" )
A_ : List[Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_a ,mode="""RGB""" ).convert("""L""" ) for _ in images) )
A_ : Tuple = [self.mel.image_to_audio(_a ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_a )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_a ) )
@torch.no_grad()
def _a ( self : Union[str, Any] ,_a : List[Image.Image] ,_a : int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_a )
self.scheduler.set_timesteps(_a )
A_ : Optional[Any] = np.array(
[np.frombuffer(image.tobytes() ,dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
A_ : List[str] = (sample / 255) * 2 - 1
A_ : Optional[int] = torch.Tensor(_a ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
A_ : List[str] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
A_ : Any = self.scheduler.alphas_cumprod[t]
A_ : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
A_ : str = 1 - alpha_prod_t
A_ : List[str] = self.unet(_a ,_a )["""sample"""]
A_ : str = (1 - alpha_prod_t_prev) ** 0.5 * model_output
A_ : Union[str, Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
A_ : Optional[int] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _a ( _a : torch.Tensor ,_a : torch.Tensor ,_a : float ):
'''simple docstring'''
A_ : List[Any] = acos(torch.dot(torch.flatten(_a ) ,torch.flatten(_a ) ) / torch.norm(_a ) / torch.norm(_a ) )
return sin((1 - alpha) * theta ) * xa / sin(_a ) + sin(alpha * theta ) * xa / sin(_a )
| 665 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__magic_name__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
a_ = field(
default="""cifar10""" , metadata={"""help""": """Name of a dataset from the datasets package"""} )
a_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """The column name of the images in the files."""} )
a_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """A folder containing the training data."""} )
a_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """A folder containing the validation data."""} )
a_ = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
a_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : List[Any] = {}
if self.train_dir is not None:
A_ : Tuple = self.train_dir
if self.validation_dir is not None:
A_ : Dict = self.validation_dir
A_ : Union[str, Any] = data_files if data_files else None
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
a_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
a_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
a_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
a_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
a_ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a_ = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Name or path of preprocessor config."""} )
a_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
a_ = field(
default=0.75 , metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
a_ = field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = field(
default=1E-3 , metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def lowerCamelCase ( lowerCamelCase : Dict):
A_ : Any = torch.stack([example["""pixel_values"""] for example in examples])
return {"pixel_values": pixel_values}
def lowerCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A_ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(""".json"""):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A_ , A_ , A_ : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
A_ , A_ , A_ : Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , lowerCamelCase , lowerCamelCase)
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout)] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A_ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(lowerCamelCase)
transformers.utils.logging.set_verbosity(lowerCamelCase)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}')
logger.info(F'Training/evaluation parameters {training_args}')
# Detecting last checkpoint.
A_ : int = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
A_ : Union[str, Any] = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""")
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""")
# Initialize our dataset.
A_ : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
A_ : Union[str, Any] = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCamelCase) and data_args.train_val_split > 0.0:
A_ : Optional[int] = ds["""train"""].train_test_split(data_args.train_val_split)
A_ : List[str] = split["""train"""]
A_ : Optional[int] = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A_ : Optional[int] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
A_ : Dict = ViTMAEConfig.from_pretrained(model_args.config_name , **lowerCamelCase)
elif model_args.model_name_or_path:
A_ : Dict = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **lowerCamelCase)
else:
A_ : Dict = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""")
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}')
config.update_from_string(model_args.config_overrides)
logger.info(F'New config: {config}')
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
})
# create image processor
if model_args.image_processor_name:
A_ : Optional[Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **lowerCamelCase)
elif model_args.model_name_or_path:
A_ : int = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **lowerCamelCase)
else:
A_ : int = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
A_ : List[Any] = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path) , config=lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""")
A_ : str = ViTMAEForPreTraining(lowerCamelCase)
if training_args.do_train:
A_ : int = ds["""train"""].column_names
else:
A_ : Tuple = ds["""validation"""].column_names
if data_args.image_column_name is not None:
A_ : Union[str, Any] = data_args.image_column_name
elif "image" in column_names:
A_ : int = """image"""
elif "img" in column_names:
A_ : Dict = """img"""
else:
A_ : Any = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
A_ : str = image_processor.size["""shortest_edge"""]
else:
A_ : List[str] = (image_processor.size["""height"""], image_processor.size["""width"""])
A_ : Union[str, Any] = Compose(
[
Lambda(lambda lowerCamelCase: img.convert("""RGB""") if img.mode != "RGB" else img),
RandomResizedCrop(lowerCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std),
])
def preprocess_images(lowerCamelCase : int):
A_ : Optional[Any] = [transforms(lowerCamelCase) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""")
if data_args.max_train_samples is not None:
A_ : Any = ds["""train"""].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples))
# Set the training transforms
ds["train"].set_transform(lowerCamelCase)
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""")
if data_args.max_eval_samples is not None:
A_ : Any = (
ds["""validation"""].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples))
)
# Set the validation transforms
ds["validation"].set_transform(lowerCamelCase)
# Compute absolute learning rate
A_ : int = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
A_ : List[Any] = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
A_ : Any = Trainer(
model=lowerCamelCase , args=lowerCamelCase , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=lowerCamelCase , data_collator=lowerCamelCase , )
# Training
if training_args.do_train:
A_ : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
A_ : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A_ : Any = last_checkpoint
A_ : List[str] = trainer.train(resume_from_checkpoint=lowerCamelCase)
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics)
trainer.save_metrics("""train""" , train_result.metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
A_ : Union[str, Any] = trainer.evaluate()
trainer.log_metrics("""eval""" , lowerCamelCase)
trainer.save_metrics("""eval""" , lowerCamelCase)
# Write model card and (optionally) push to hub
A_ : Union[str, Any] = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCamelCase)
else:
trainer.create_model_card(**lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : int):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 665 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__magic_name__ = 16
__magic_name__ = 32
def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16):
A_ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""")
A_ : str = load_dataset("""glue""" , """mrpc""")
def tokenize_function(lowerCamelCase : Dict):
# max_length=None => use the model max length (it's actually the default)
A_ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A_ : Tuple = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(lowerCamelCase : Tuple):
# On TPU it's best to pad everything to the same length or training will be very slow.
A_ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A_ : List[Any] = 16
elif accelerator.mixed_precision != "no":
A_ : Any = 8
else:
A_ : Tuple = None
return tokenizer.pad(
lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
A_ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=lowerCamelCase)
A_ : str = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Dict):
# Initialize accelerator
A_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : List[Any] = config["""lr"""]
A_ : List[Any] = int(config["""num_epochs"""])
A_ : int = int(config["""seed"""])
A_ : Dict = int(config["""batch_size"""])
A_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""")
# If the batch size is too big we use gradient accumulation
A_ : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A_ : Any = batch_size // MAX_GPU_BATCH_SIZE
A_ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(lowerCamelCase)
A_ , A_ : List[str] = get_dataloaders(lowerCamelCase , lowerCamelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ : str = model.to(accelerator.device)
# Instantiate optimizer
A_ : str = AdamW(params=model.parameters() , lr=lowerCamelCase)
# Instantiate scheduler
A_ : Tuple = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ , A_ , A_ , A_ , A_ : Union[str, Any] = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Now we train the model
for epoch in range(lowerCamelCase):
model.train()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
A_ : Optional[int] = model(**lowerCamelCase)
A_ : List[Any] = outputs.loss
A_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
A_ : Union[str, Any] = model(**lowerCamelCase)
A_ : Any = outputs.logits.argmax(dim=-1)
A_ , A_ : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]))
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
A_ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase)
def lowerCamelCase ( ):
A_ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""")
parser.add_argument(
"""--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""")
A_ : Dict = parser.parse_args()
A_ : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__magic_name__ = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
__magic_name__ = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
__magic_name__ = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = BertTokenizer
def __init__( self : Optional[int] ,_a : Tuple=None ,_a : Tuple=None ,_a : Union[str, Any]=True ,_a : List[Any]="[UNK]" ,_a : Any="[SEP]" ,_a : Union[str, Any]="[PAD]" ,_a : List[str]="[CLS]" ,_a : Dict="[MASK]" ,_a : Tuple=True ,_a : Dict=None ,**_a : Dict ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
A_ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_a ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_a ) != tokenize_chinese_chars
):
A_ : str = getattr(_a ,normalizer_state.pop("""type""" ) )
A_ : List[str] = do_lower_case
A_ : int = strip_accents
A_ : Tuple = tokenize_chinese_chars
A_ : Dict = normalizer_class(**_a )
A_ : List[Any] = do_lower_case
def _a ( self : str ,_a : Tuple ,_a : Optional[Any]=None ):
'''simple docstring'''
A_ : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : str = [self.sep_token_id]
A_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : Tuple ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : Union[str, Any] = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 665 |
'''simple docstring'''
import functools
def lowerCamelCase ( lowerCamelCase : list[int] , lowerCamelCase : list[int]):
# Validation
if not isinstance(lowerCamelCase , lowerCamelCase) or not all(isinstance(lowerCamelCase , lowerCamelCase) for day in days):
raise ValueError("""The parameter days should be a list of integers""")
if len(lowerCamelCase) != 3 or not all(isinstance(lowerCamelCase , lowerCamelCase) for cost in costs):
raise ValueError("""The parameter costs should be a list of three integers""")
if len(lowerCamelCase) == 0:
return 0
if min(lowerCamelCase) <= 0:
raise ValueError("""All days elements should be greater than 0""")
if max(lowerCamelCase) >= 366:
raise ValueError("""All days elements should be less than 366""")
A_ : Tuple = set(lowerCamelCase)
@functools.cache
def dynamic_programming(lowerCamelCase : int) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1)
return min(
costs[0] + dynamic_programming(index + 1) , costs[1] + dynamic_programming(index + 7) , costs[2] + dynamic_programming(index + 30) , )
return dynamic_programming(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCamelCase ( lowerCamelCase : ndarray):
return np.dot(lowerCamelCase , lowerCamelCase)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : int ,*,
_a : float = np.inf ,_a : str = "linear" ,_a : float = 0.0 ,):
'''simple docstring'''
A_ : int = regularization
A_ : Optional[Any] = gamma
if kernel == "linear":
A_ : Optional[Any] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("""rbf kernel requires gamma""" )
if not isinstance(self.gamma ,(float, int) ):
raise ValueError("""gamma must be float or int""" )
if not self.gamma > 0:
raise ValueError("""gamma must be > 0""" )
A_ : List[Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
A_ : List[str] = f'Unknown kernel: {kernel}'
raise ValueError(_a )
def _a ( self : Tuple ,_a : ndarray ,_a : ndarray ):
'''simple docstring'''
return np.dot(_a ,_a )
def _a ( self : Tuple ,_a : ndarray ,_a : ndarray ):
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _a ( self : Any ,_a : list[ndarray] ,_a : ndarray ):
'''simple docstring'''
A_ : Any = observations
A_ : List[Any] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((A_) , ) : Any = np.shape(_a )
def to_minimize(_a : ndarray ) -> float:
A_ : str = 0
((A_) , ) : int = np.shape(_a )
for i in range(_a ):
for j in range(_a ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] ,observations[j] )
)
return 1 / 2 * s - sum(_a )
A_ : int = LinearConstraint(_a ,0 ,0 )
A_ : List[Any] = Bounds(0 ,self.regularization )
A_ : Optional[int] = minimize(
_a ,np.ones(_a ) ,bounds=_a ,constraints=[ly_contraint] ).x
A_ : List[Any] = l_star
# calculating mean offset of separation plane to points
A_ : Dict = 0
for i in range(_a ):
for j in range(_a ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] ,observations[j] )
A_ : Dict = s / n
def _a ( self : int ,_a : ndarray ):
'''simple docstring'''
A_ : Optional[Any] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] ,_a )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCamelCase ( lowerCamelCase : NDArray[floataa] , lowerCamelCase : NDArray[floataa] , lowerCamelCase : list[int] , lowerCamelCase : int , ):
A_ , A_ : int = coefficient_matrix.shape
A_ , A_ : Union[str, Any] = constant_matrix.shape
if rowsa != colsa:
A_ : Any = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if colsa != 1:
A_ : Tuple = F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if rowsa != rowsa:
A_ : Dict = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(lowerCamelCase)
if len(lowerCamelCase) != rowsa:
A_ : Union[str, Any] = (
"""Number of initial values must be equal to number of rows in coefficient """
F'matrix but received {len(lowerCamelCase)} and {rowsa}'
)
raise ValueError(lowerCamelCase)
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""")
A_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1)
A_ , A_ : int = table.shape
strictly_diagonally_dominant(lowerCamelCase)
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase):
A_ : List[Any] = []
for row in range(lowerCamelCase):
A_ : int = 0
for col in range(lowerCamelCase):
if col == row:
A_ : List[str] = table[row][col]
elif col == cols - 1:
A_ : str = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ : Union[str, Any] = (temp + val) / denom
new_val.append(lowerCamelCase)
A_ : Tuple = new_val
return [float(lowerCamelCase) for i in new_val]
def lowerCamelCase ( lowerCamelCase : NDArray[floataa]):
A_ , A_ : Dict = table.shape
A_ : Union[str, Any] = True
for i in range(0 , lowerCamelCase):
A_ : str = 0
for j in range(0 , cols - 1):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""")
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__magic_name__ = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
A_ : Any = len(lowerCamelCase)
A_ : Optional[Any] = len(lowerCamelCase)
A_ : Optional[int] = [[False for _ in range(m + 1)] for _ in range(n + 1)]
A_ : Union[str, Any] = True
for i in range(lowerCamelCase):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A_ : Optional[int] = True
if a[i].islower():
A_ : List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['OwlViTFeatureExtractor']
__magic_name__ = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
a_ = 42
a_ = 42
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : list[list[Edge]] = [[] for _ in range(_a )]
A_ : List[Any] = size
def __getitem__( self : int ,_a : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _a ( self : str ):
'''simple docstring'''
return self._size
def _a ( self : str ,_a : int ,_a : int ,_a : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(_a ,_a ) )
def _a ( self : Dict ,_a : int ,_a : int ):
'''simple docstring'''
A_ : Tuple = deque([start_vertex] )
A_ : list[int | None] = [None] * self.size
A_ : Union[str, Any] = 0
while queue:
A_ : List[Any] = queue.popleft()
A_ : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A_ : Union[str, Any] = current_distance + edge.weight
A_ : Optional[Any] = distances[edge.destination_vertex]
if (
isinstance(_a ,_a )
and new_distance >= dest_vertex_distance
):
continue
A_ : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _a ( self : List[str] ,_a : int ,_a : Any ,_a : int ):
'''simple docstring'''
A_ : Dict = TextaTextGenerationPipeline(model=_a ,tokenizer=_a )
return generator, ["Something to write", "Something else"]
def _a ( self : str ,_a : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : Any = generator("""Something there""" )
self.assertEqual(_a ,[{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
A_ : List[Any] = generator(["""This is great !""", """Something else"""] ,num_return_sequences=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
A_ : List[str] = generator(
["""This is great !""", """Something else"""] ,num_return_sequences=2 ,batch_size=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
with self.assertRaises(_a ):
generator(4 )
@require_torch
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : int = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""pt""" )
# do_sample=False necessary for reproducibility
A_ : Tuple = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
A_ : Optional[int] = 3
A_ : Tuple = generator(
"""Something there""" ,num_return_sequences=_a ,num_beams=_a ,)
A_ : Optional[Any] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a ,_a )
A_ : Optional[int] = generator("""This is a test""" ,do_sample=_a ,num_return_sequences=2 ,return_tensors=_a )
self.assertEqual(
_a ,[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] ,)
A_ : Dict = generator.model.config.eos_token_id
A_ : Optional[int] = """<pad>"""
A_ : List[Any] = generator(
["""This is a test""", """This is a second test"""] ,do_sample=_a ,num_return_sequences=2 ,batch_size=2 ,return_tensors=_a ,)
self.assertEqual(
_a ,[
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] ,)
@require_tf
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""tf""" )
# do_sample=False necessary for reproducibility
A_ : Dict = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int = 10**9):
A_ : Optional[int] = 1
A_ : int = 2
A_ : List[Any] = 0
A_ : Optional[Any] = 0
A_ : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
A_ : Optional[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 665 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
A_ : Any = len(lowerCamelCase)
A_ : Optional[Any] = len(lowerCamelCase)
A_ : Optional[int] = [[False for _ in range(m + 1)] for _ in range(n + 1)]
A_ : Union[str, Any] = True
for i in range(lowerCamelCase):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A_ : Optional[int] = True
if a[i].islower():
A_ : List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase ( ):
A_ : Optional[int] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCamelCase)
A_ : Optional[int] = parser.add_subparsers(help="""accelerate command helpers""")
# Register commands
get_config_parser(subparsers=lowerCamelCase)
env_command_parser(subparsers=lowerCamelCase)
launch_command_parser(subparsers=lowerCamelCase)
tpu_command_parser(subparsers=lowerCamelCase)
test_command_parser(subparsers=lowerCamelCase)
# Let's go
A_ : Dict = parser.parse_args()
if not hasattr(lowerCamelCase , """func"""):
parser.print_help()
exit(1)
# Run
args.func(lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 1 |
'''simple docstring'''
import argparse
__magic_name__ = 'docs/source/_static/js/custom.js'
def lowerCamelCase ( lowerCamelCase : Any):
with open(lowerCamelCase , encoding="""utf-8""" , newline="""\n""") as f:
A_ : Any = f.readlines()
A_ : Dict = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion ="""):
index += 1
A_ : int = F'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {"""):
index += 1
# We go until the end
while not lines[index].startswith("""}"""):
index += 1
# We add the new version at the end
lines[index - 1] += F' "v{version}": "v{version}",\n'
with open(lowerCamelCase , """w""" , encoding="""utf-8""" , newline="""\n""") as f:
f.writelines(lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
__magic_name__ = parser.parse_args()
update_custom_js(args.version)
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = ShapEImgaImgPipeline
a_ = ["""image"""]
a_ = ["""image"""]
a_ = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
a_ = False
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return 32
@property
def _a ( self : List[str] ):
'''simple docstring'''
return 32
@property
def _a ( self : int ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 8
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Optional[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=64 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1 ,)
A_ : Tuple = CLIPVisionModel(_a )
return model
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Dict = CLIPImageProcessor(
crop_size=224 ,do_center_crop=_a ,do_normalize=_a ,do_resize=_a ,image_mean=[0.48145466, 0.4578275, 0.40821073] ,image_std=[0.26862954, 0.26130258, 0.27577711] ,resample=3 ,size=224 ,)
return image_processor
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Union[str, Any] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
A_ : List[Any] = PriorTransformer(**_a )
return model
@property
def _a ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Dict = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
A_ : Optional[Any] = ShapERenderer(**_a )
return model
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[Any] = self.dummy_prior
A_ : Optional[Any] = self.dummy_image_encoder
A_ : int = self.dummy_image_processor
A_ : List[Any] = self.dummy_renderer
A_ : Tuple = HeunDiscreteScheduler(
beta_schedule="""exp""" ,num_train_timesteps=1024 ,prediction_type="""sample""" ,use_karras_sigmas=_a ,clip_sample=_a ,clip_sample_range=1.0 ,)
A_ : List[Any] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def _a ( self : Any ,_a : List[Any] ,_a : int=0 ):
'''simple docstring'''
A_ : Any = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("""mps""" ):
A_ : Union[str, Any] = torch.manual_seed(_a )
else:
A_ : int = torch.Generator(device=_a ).manual_seed(_a )
A_ : List[Any] = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def _a ( self : Any ):
'''simple docstring'''
A_ : Any = """cpu"""
A_ : Union[str, Any] = self.get_dummy_components()
A_ : Any = self.pipeline_class(**_a )
A_ : Optional[Any] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Dict = pipe(**self.get_dummy_inputs(_a ) )
A_ : Dict = output.images[0]
A_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
A_ : Any = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Dict ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : List[str] = torch_device == """cpu"""
A_ : str = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=_a ,relax_max_difference=_a ,)
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = self.get_dummy_components()
A_ : int = self.pipeline_class(**_a )
A_ : Dict = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Optional[int] = 1
A_ : Dict = 2
A_ : List[str] = self.get_dummy_inputs(_a )
for key in inputs.keys():
if key in self.batch_params:
A_ : int = batch_size * [inputs[key]]
A_ : Optional[Any] = pipe(**_a ,num_images_per_prompt=_a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
A_ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
A_ : int = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
A_ : List[str] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : List[Any] = torch.Generator(device=_a ).manual_seed(0 )
A_ : Dict = pipe(
_a ,generator=_a ,guidance_scale=3.0 ,num_inference_steps=64 ,frame_size=64 ,output_type="""np""" ,).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_a ,_a )
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['YolosFeatureExtractor']
__magic_name__ = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
a_ = 42
a_ = jnp.floataa
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[str] = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : Optional[Any] ,_a : str ):
'''simple docstring'''
A_ , A_ , A_ , A_ : Tuple = hidden_states.shape
A_ : Optional[int] = jax.image.resize(
_a ,shape=(batch, height * 2, width * 2, channels) ,method="""nearest""" ,)
A_ : List[Any] = self.conv(_a )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
a_ = 42
a_ = jnp.floataa
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : List[str] = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : Dict ,_a : int ):
'''simple docstring'''
A_ : Optional[Any] = self.conv(_a )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
a_ = 42
a_ = None
a_ = 0.0
a_ = None
a_ = jnp.floataa
def _a ( self : int ):
'''simple docstring'''
A_ : Any = self.in_channels if self.out_channels is None else self.out_channels
A_ : Dict = nn.GroupNorm(num_groups=32 ,epsilon=1e-5 )
A_ : Union[str, Any] = nn.Conv(
_a ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
A_ : List[Any] = nn.Dense(_a ,dtype=self.dtype )
A_ : int = nn.GroupNorm(num_groups=32 ,epsilon=1e-5 )
A_ : int = nn.Dropout(self.dropout_prob )
A_ : str = nn.Conv(
_a ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
A_ : Dict = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
A_ : Tuple = None
if use_nin_shortcut:
A_ : List[Any] = nn.Conv(
_a ,kernel_size=(1, 1) ,strides=(1, 1) ,padding="""VALID""" ,dtype=self.dtype ,)
def __call__( self : List[Any] ,_a : Optional[int] ,_a : Optional[Any] ,_a : List[str]=True ):
'''simple docstring'''
A_ : Tuple = hidden_states
A_ : str = self.norma(_a )
A_ : Any = nn.swish(_a )
A_ : Optional[Any] = self.conva(_a )
A_ : Optional[int] = self.time_emb_proj(nn.swish(_a ) )
A_ : int = jnp.expand_dims(jnp.expand_dims(_a ,1 ) ,1 )
A_ : Any = hidden_states + temb
A_ : Union[str, Any] = self.norma(_a )
A_ : Union[str, Any] = nn.swish(_a )
A_ : Dict = self.dropout(_a ,_a )
A_ : int = self.conva(_a )
if self.conv_shortcut is not None:
A_ : List[Any] = self.conv_shortcut(_a )
return hidden_states + residual
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : list):
def merge(lowerCamelCase : list , lowerCamelCase : list) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0)
yield from left
yield from right
return list(_merge())
if len(lowerCamelCase) <= 1:
return collection
A_ : Union[str, Any] = len(lowerCamelCase) // 2
return merge(merge_sort(collection[:mid]) , merge_sort(collection[mid:]))
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = input('Enter numbers separated by a comma:\n').strip()
__magic_name__ = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : str = [0] * len(lowerCamelCase)
A_ : Union[str, Any] = []
A_ : Union[str, Any] = []
A_ : Tuple = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowerCamelCase)):
if indegree[i] == 0:
queue.append(lowerCamelCase)
while queue:
A_ : Any = queue.pop(0)
cnt += 1
topo.append(lowerCamelCase)
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowerCamelCase)
if cnt != len(lowerCamelCase):
print("""Cycle exists""")
else:
print(lowerCamelCase)
# Adjacency List of Graph
__magic_name__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 665 | 1 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """retribert"""
def __init__( self : int ,_a : Dict=30522 ,_a : List[Any]=768 ,_a : Optional[Any]=8 ,_a : str=12 ,_a : str=3072 ,_a : Tuple="gelu" ,_a : Optional[int]=0.1 ,_a : Dict=0.1 ,_a : List[Any]=512 ,_a : Union[str, Any]=2 ,_a : Tuple=0.02 ,_a : List[str]=1e-12 ,_a : Dict=True ,_a : Tuple=128 ,_a : Optional[int]=0 ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,**_a )
A_ : Dict = vocab_size
A_ : int = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : int = intermediate_size
A_ : Tuple = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : Optional[int] = initializer_range
A_ : Dict = layer_norm_eps
A_ : str = share_encoders
A_ : List[Any] = projection_dim
| 665 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[str]=7 ,_a : Dict=True ,_a : List[Any]=True ,_a : Dict=False ,_a : Optional[int]=True ,_a : List[Any]=99 ,_a : Any=32 ,_a : Optional[int]=5 ,_a : List[Any]=4 ,_a : int=37 ,_a : List[Any]="gelu" ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : Any=512 ,_a : int=16 ,_a : Optional[int]=2 ,_a : Any=0.02 ,_a : Any=3 ,_a : Any=4 ,_a : List[str]=None ,):
'''simple docstring'''
A_ : List[str] = parent
A_ : Any = batch_size
A_ : Tuple = seq_length
A_ : List[str] = is_training
A_ : Tuple = use_input_mask
A_ : Dict = use_token_type_ids
A_ : List[Any] = use_labels
A_ : Union[str, Any] = vocab_size
A_ : Any = hidden_size
A_ : str = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : str = intermediate_size
A_ : Tuple = hidden_act
A_ : Any = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : int = type_vocab_size
A_ : Union[str, Any] = type_sequence_label_size
A_ : Any = initializer_range
A_ : List[Any] = num_labels
A_ : Optional[Any] = num_choices
A_ : List[Any] = scope
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : int = None
if self.use_input_mask:
A_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Dict = None
if self.use_token_type_ids:
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : str = None
A_ : Any = None
A_ : str = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
A_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
def _a ( self : Union[str, Any] ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Any ,_a : Any ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Tuple ):
'''simple docstring'''
A_ : Any = LlamaModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[Any] = model(_a ,attention_mask=_a )
A_ : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Optional[int] ,_a : int ,_a : List[str] ,_a : Any ,_a : Any ,_a : Dict ,_a : List[str] ,_a : Optional[int] ,_a : Any ,_a : List[str] ,):
'''simple docstring'''
A_ : List[str] = True
A_ : Union[str, Any] = LlamaModel(_a )
model.to(_a )
model.eval()
A_ : Tuple = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,)
A_ : List[Any] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,)
A_ : int = model(_a ,attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Any ,_a : Any ,_a : Optional[int] ,_a : List[Any] ,_a : List[Any] ,_a : Dict ,_a : Tuple ,_a : Optional[int] ,_a : List[Any] ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : List[Any] = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
A_ : Dict = model(_a ,attention_mask=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : str ,_a : List[Any] ,_a : Dict ,_a : str ,_a : Tuple ,_a : Tuple ,_a : Tuple ,_a : Optional[Any] ,_a : Dict ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : Optional[Any] = True
A_ : Any = True
A_ : Tuple = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
A_ : Optional[int] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,use_cache=_a ,)
A_ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A_ : List[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
A_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
A_ : int = torch.cat([input_mask, next_mask] ,dim=-1 )
A_ : List[str] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
A_ : Any = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,past_key_values=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
# select random slice
A_ : List[str] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
A_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a ,_a ,atol=1e-3 ) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Any = config_and_inputs
A_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
a_ = (LlamaForCausalLM,) if is_torch_available() else ()
a_ = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = LlamaModelTester(self )
A_ : List[str] = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def _a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : Dict = type
self.model_tester.create_and_check_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = 3
A_ : Any = input_dict["""input_ids"""]
A_ : Union[str, Any] = input_ids.ne(1 ).to(_a )
A_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : int = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Dict ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : str = 3
A_ : Union[str, Any] = """single_label_classification"""
A_ : Union[str, Any] = input_dict["""input_ids"""]
A_ : List[Any] = input_ids.ne(1 ).to(_a )
A_ : Dict = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : List[str] = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = 3
A_ : Dict = """multi_label_classification"""
A_ : Any = input_dict["""input_ids"""]
A_ : Optional[Any] = input_ids.ne(1 ).to(_a )
A_ : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
A_ : Optional[int] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def _a ( self : Any ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _a ( self : Optional[Any] ,_a : List[Any] ):
'''simple docstring'''
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ids_tensor([1, 10] ,config.vocab_size )
A_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : int = LlamaModel(_a )
original_model.to(_a )
original_model.eval()
A_ : Tuple = original_model(_a ).last_hidden_state
A_ : Union[str, Any] = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : Tuple = {"""type""": scaling_type, """factor""": 10.0}
A_ : int = LlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
A_ : List[Any] = scaled_model(_a ).last_hidden_state
A_ : Any = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a ,_a ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Any = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" ,device_map="""auto""" )
A_ : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : str ):
'''simple docstring'''
A_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""auto""" )
A_ : int = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : Optional[int] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
A_ : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
A_ : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
A_ : List[str] = """Simply put, the theory of relativity states that """
A_ : Any = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
A_ : Union[str, Any] = tokenizer.encode(_a ,return_tensors="""pt""" )
A_ : List[str] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""sequential""" ,use_safetensors=_a )
# greedy generation outputs
A_ : str = model.generate(_a ,max_new_tokens=64 ,top_p=_a ,temperature=1 ,do_sample=_a )
A_ : Optional[Any] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=_a )
self.assertEqual(_a ,_a )
| 665 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Dict ):
'''simple docstring'''
A_ : Union[str, Any] = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
A_ : List[Any] = dict(zip(_a ,range(len(_a ) ) ) )
A_ : List[Any] = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
A_ : Optional[int] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 16000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
A_ : List[str] = tempfile.mkdtemp()
A_ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : int = os.path.join(self.tmpdirname ,_a )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
with open(self.feature_extraction_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_a ) + """\n""" )
# load decoder from hub
A_ : int = """hf-internal-testing/ngram-beam-search-decoder"""
def _a ( self : Any ,**_a : Union[str, Any] ):
'''simple docstring'''
A_ : str = self.add_kwargs_tokens_map.copy()
kwargs.update(_a )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**_a )
def _a ( self : Optional[Any] ,**_a : List[str] ):
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**_a )
def _a ( self : Union[str, Any] ,**_a : Dict ):
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**_a )
def _a ( self : Tuple ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _a ( self : Any ):
'''simple docstring'''
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Optional[int] = self.get_feature_extractor()
A_ : str = self.get_decoder()
A_ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_a ,feature_extractor=_a ,decoder=_a )
processor.save_pretrained(self.tmpdirname )
A_ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_a )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,_a )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,_a )
def _a ( self : str ):
'''simple docstring'''
A_ : int = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
A_ : Tuple = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : int = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(_a ,"""include""" ):
WavaVecaProcessorWithLM(
tokenizer=_a ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : List[Any] = self.get_feature_extractor()
A_ : Dict = self.get_tokenizer()
A_ : List[Any] = self.get_decoder()
A_ : Tuple = WavaVecaProcessorWithLM(tokenizer=_a ,feature_extractor=_a ,decoder=_a )
A_ : int = floats_list((3, 1000) )
A_ : Tuple = feature_extractor(_a ,return_tensors="""np""" )
A_ : List[str] = processor(_a ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def _a ( self : int ):
'''simple docstring'''
A_ : Any = self.get_feature_extractor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : Dict = self.get_decoder()
A_ : List[Any] = WavaVecaProcessorWithLM(tokenizer=_a ,feature_extractor=_a ,decoder=_a )
A_ : Dict = """This is a test string"""
A_ : int = processor(text=_a )
A_ : List[Any] = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _a ( self : Dict ,_a : Dict=(2, 10, 16) ,_a : Union[str, Any]=77 ):
'''simple docstring'''
np.random.seed(_a )
return np.random.rand(*_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : str = self.get_feature_extractor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : Union[str, Any] = self.get_decoder()
A_ : Dict = WavaVecaProcessorWithLM(tokenizer=_a ,feature_extractor=_a ,decoder=_a )
A_ : Dict = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
A_ : Optional[Any] = processor.decode(_a )
A_ : Optional[Any] = decoder.decode_beams(_a )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def _a ( self : Union[str, Any] ,_a : List[str] ):
'''simple docstring'''
A_ : Dict = self.get_feature_extractor()
A_ : List[str] = self.get_tokenizer()
A_ : Any = self.get_decoder()
A_ : Any = WavaVecaProcessorWithLM(tokenizer=_a ,feature_extractor=_a ,decoder=_a )
A_ : Union[str, Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
A_ : Tuple = processor.batch_decode(_a )
else:
with get_context(_a ).Pool() as pool:
A_ : Tuple = processor.batch_decode(_a ,_a )
A_ : str = list(_a )
with get_context("""fork""" ).Pool() as p:
A_ : Optional[int] = decoder.decode_beams_batch(_a ,_a )
A_ , A_ , A_ : Dict = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_a ,decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] ,decoded_processor.text )
self.assertListEqual(_a ,decoded_processor.logit_score )
self.assertListEqual(_a ,decoded_processor.lm_score )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Optional[int] = self.get_feature_extractor()
A_ : Dict = self.get_tokenizer()
A_ : Any = self.get_decoder()
A_ : List[Any] = WavaVecaProcessorWithLM(tokenizer=_a ,feature_extractor=_a ,decoder=_a )
A_ : Tuple = self._get_dummy_logits()
A_ : Optional[Any] = 15
A_ : List[str] = -20.0
A_ : Optional[Any] = -4.0
A_ : Optional[Any] = processor.batch_decode(
_a ,beam_width=_a ,beam_prune_logp=_a ,token_min_logp=_a ,)
A_ : Union[str, Any] = decoded_processor_out.text
A_ : Dict = list(_a )
with get_context("""fork""" ).Pool() as pool:
A_ : Tuple = decoder.decode_beams_batch(
_a ,_a ,beam_width=_a ,beam_prune_logp=_a ,token_min_logp=_a ,)
A_ : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
A_ : Union[str, Any] = [d[0][2] for d in decoded_decoder_out]
A_ : Union[str, Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_a ,_a )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] ,_a )
self.assertTrue(np.array_equal(_a ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] ,_a ,atol=1e-3 ) )
self.assertTrue(np.array_equal(_a ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] ,_a ,atol=1e-3 ) )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : List[str] = self.get_feature_extractor()
A_ : List[Any] = self.get_tokenizer()
A_ : Optional[Any] = self.get_decoder()
A_ : Dict = WavaVecaProcessorWithLM(tokenizer=_a ,feature_extractor=_a ,decoder=_a )
A_ : Optional[int] = self._get_dummy_logits()
A_ : Tuple = 2.0
A_ : int = 5.0
A_ : int = -20.0
A_ : str = True
A_ : int = processor.batch_decode(
_a ,alpha=_a ,beta=_a ,unk_score_offset=_a ,lm_score_boundary=_a ,)
A_ : Optional[int] = decoded_processor_out.text
A_ : Optional[Any] = list(_a )
decoder.reset_params(
alpha=_a ,beta=_a ,unk_score_offset=_a ,lm_score_boundary=_a ,)
with get_context("""fork""" ).Pool() as pool:
A_ : Optional[Any] = decoder.decode_beams_batch(
_a ,_a ,)
A_ : Tuple = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_a ,_a )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] ,_a )
A_ : Any = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-20.0 )
self.assertEqual(lm_model.score_boundary ,_a )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : List[str] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
A_ : Optional[int] = processor.decoder.model_container[processor.decoder._model_key]
A_ : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
A_ : Tuple = os.listdir(_a )
A_ : Any = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_a ,_a )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = snapshot_download("""hf-internal-testing/processor_with_lm""" )
A_ : int = WavaVecaProcessorWithLM.from_pretrained(_a )
A_ : Any = processor.decoder.model_container[processor.decoder._model_key]
A_ : str = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
A_ : Union[str, Any] = os.listdir(_a )
A_ : List[Any] = os.listdir(_a )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_a ,_a )
def _a ( self : Dict ):
'''simple docstring'''
A_ : Dict = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
A_ : Dict = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
A_ : Dict = floats_list((3, 1000) )
A_ : List[Any] = processor_wavaveca(_a ,return_tensors="""np""" )
A_ : Optional[int] = processor_auto(_a ,return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1e-2 )
A_ : Optional[int] = self._get_dummy_logits()
A_ : str = processor_wavaveca.batch_decode(_a )
A_ : Union[str, Any] = processor_auto.batch_decode(_a )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def _a ( self : Any ):
'''simple docstring'''
A_ : int = self.get_feature_extractor()
A_ : Tuple = self.get_tokenizer()
A_ : List[Any] = self.get_decoder()
A_ : int = WavaVecaProcessorWithLM(tokenizer=_a ,feature_extractor=_a ,decoder=_a )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg="""`processor` and `feature_extractor` model input names do not match""" ,)
@staticmethod
def _a ( _a : Optional[int] ,_a : Optional[int] ):
'''simple docstring'''
A_ : str = [d[key] for d in offsets]
return retrieved_list
def _a ( self : str ):
'''simple docstring'''
A_ : List[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
A_ : List[Any] = self._get_dummy_logits()[0]
A_ : Tuple = processor.decode(_a ,output_word_offsets=_a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_a ,_a ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] ,"""word""" ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""word""" ) ,["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""start_offset""" ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] ,"""end_offset""" ) ,[1, 3, 5] )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Dict = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
A_ : List[Any] = self._get_dummy_logits()
A_ : int = processor.batch_decode(_a ,output_word_offsets=_a )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(_a ,_a ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(_a ,"""word""" ) ) for o in outputs["""word_offsets"""]] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""word""" ) ,["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""start_offset""" ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] ,"""end_offset""" ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _a ( self : List[Any] ):
'''simple docstring'''
import torch
A_ : Tuple = load_dataset("""common_voice""" ,"""en""" ,split="""train""" ,streaming=_a )
A_ : Union[str, Any] = ds.cast_column("""audio""" ,datasets.Audio(sampling_rate=16000 ) )
A_ : List[str] = iter(_a )
A_ : Tuple = next(_a )
A_ : Union[str, Any] = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
A_ : Tuple = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
A_ : Optional[int] = processor(sample["""audio"""]["""array"""] ,return_tensors="""pt""" ).input_values
with torch.no_grad():
A_ : List[Any] = model(_a ).logits.cpu().numpy()
A_ : str = processor.decode(logits[0] ,output_word_offsets=_a )
A_ : Union[str, Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
A_ : int = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
A_ : Optional[Any] = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(_a ,"""word""" ) ) ,_a )
self.assertEqual(""" """.join(self.get_from_offsets(_a ,"""word""" ) ) ,output.text )
# output times
A_ : List[Any] = torch.tensor(self.get_from_offsets(_a ,"""start_time""" ) )
A_ : str = torch.tensor(self.get_from_offsets(_a ,"""end_time""" ) )
# fmt: off
A_ : Any = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
A_ : List[Any] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_a ,_a ,atol=0.01 ) )
self.assertTrue(torch.allclose(_a ,_a ,atol=0.01 ) )
| 665 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
__magic_name__ = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
__magic_name__ = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
__magic_name__ = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def _a ( self : Optional[Any] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) ,homepage="""https://github.com/hendrycks/math""" ,codebase_urls=["""https://github.com/hendrycks/math"""] ,)
def _a ( self : List[Any] ,_a : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = 0.0
for i, j in zip(_a ,_a ):
n_correct += 1.0 if math_equivalence.is_equiv(_a ,_a ) else 0.0
A_ : List[str] = n_correct / len(_a )
return {
"accuracy": accuracy,
}
| 665 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int = 1000):
A_ : Tuple = 2**power
A_ : List[Any] = str(lowerCamelCase)
A_ : List[str] = list(lowerCamelCase)
A_ : int = 0
for i in list_num:
sum_of_num += int(lowerCamelCase)
return sum_of_num
if __name__ == "__main__":
__magic_name__ = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
__magic_name__ = solution(power)
print('Sum of the digits is: ', result)
| 665 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
# TODO: upload to AWS
__magic_name__ = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """retribert"""
def __init__( self : int ,_a : Dict=30522 ,_a : List[Any]=768 ,_a : Optional[Any]=8 ,_a : str=12 ,_a : str=3072 ,_a : Tuple="gelu" ,_a : Optional[int]=0.1 ,_a : Dict=0.1 ,_a : List[Any]=512 ,_a : Union[str, Any]=2 ,_a : Tuple=0.02 ,_a : List[str]=1e-12 ,_a : Dict=True ,_a : Tuple=128 ,_a : Optional[int]=0 ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,**_a )
A_ : Dict = vocab_size
A_ : int = hidden_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : int = intermediate_size
A_ : Tuple = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : Optional[int] = initializer_range
A_ : Dict = layer_norm_eps
A_ : str = share_encoders
A_ : List[Any] = projection_dim
| 665 | 1 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = 0
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Tuple = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(_a ,_a )
def _a ( self : Any ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Union[str, Any] = Path(_a ) / """preprocessor_config.json"""
A_ : Union[str, Any] = Path(_a ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(_a ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(_a ,"""w""" ) )
A_ : int = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a ,_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Dict = Path(_a ) / """preprocessor_config.json"""
A_ : int = Path(_a ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} ,open(_a ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(_a ,"""w""" ) )
A_ : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a ,_a )
def _a ( self : Optional[int] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Any = CLIPConfig()
# Create a dummy config file with image_proceesor_type
A_ : str = Path(_a ) / """preprocessor_config.json"""
A_ : Dict = Path(_a ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(_a ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(_a ,"""w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
A_ : List[Any] = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop("""image_processor_type""" )
A_ : Dict = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
A_ : List[str] = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
A_ : Tuple = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(_a ,_a )
def _a ( self : Any ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : List[Any] = Path(_a ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} ,open(_a ,"""w""" ) ,)
A_ : Tuple = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a ,_a )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
_a ,"""clip-base is not a local folder and is not a valid model identifier""" ):
A_ : str = AutoImageProcessor.from_pretrained("""clip-base""" )
def _a ( self : List[str] ):
'''simple docstring'''
with self.assertRaisesRegex(
_a ,r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
A_ : Union[str, Any] = AutoImageProcessor.from_pretrained(_a ,revision="""aaaaaa""" )
def _a ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
_a ,"""hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" ,):
A_ : Optional[int] = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def _a ( self : List[Any] ):
'''simple docstring'''
with self.assertRaises(_a ):
A_ : str = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
A_ : Any = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=_a )
A_ : Union[str, Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
A_ : Union[str, Any] = AutoImageProcessor.from_pretrained(_a ,trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ ,"""NewImageProcessor""" )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
try:
AutoConfig.register("""custom""" ,_a )
AutoImageProcessor.register(_a ,_a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a ,_a )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : int = Path(_a ) / """preprocessor_config.json"""
A_ : int = Path(_a ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} ,open(_a ,"""w""" ) ,)
json.dump({"""model_type""": """clip"""} ,open(_a ,"""w""" ) )
A_ : str = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
A_ : Any = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a ,_a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _a ( self : Any ):
'''simple docstring'''
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = True
try:
AutoConfig.register("""custom""" ,_a )
AutoImageProcessor.register(_a ,_a )
# If remote code is not set, the default is to use local
A_ : Optional[int] = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
A_ : Tuple = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
A_ : Any = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" ,trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ ,"""NewImageProcessor""" )
self.assertTrue(not hasattr(_a ,"""is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 665 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'spiece.model'}
__magic_name__ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
__magic_name__ = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = []
def __init__( self : Optional[int] ,_a : int ,_a : Optional[Any]="<unk>" ,_a : int="<s>" ,_a : str="</s>" ,_a : Optional[Any]="<pad>" ,_a : Tuple="[SEP]" ,_a : Tuple="[MASK]" ,_a : Union[str, Any]="[CLS]" ,_a : Optional[Dict[str, Any]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : List[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
A_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,pad_token=_a ,sep_token=_a ,mask_token=_a ,cls_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
A_ : Optional[int] = vocab_file
A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Tuple = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self.__dict__.copy()
A_ : Union[str, Any] = None
return state
def __setstate__( self : List[Any] ,_a : Any ):
'''simple docstring'''
A_ : Tuple = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
A_ : Tuple = {}
A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self : Union[str, Any] ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def _a ( self : Optional[int] ,_a : str ):
'''simple docstring'''
return self.sp_model.piece_to_id(_a )
def _a ( self : int ,_a : Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.sp_model.IdToPiece(_a )
return token
def _a ( self : Dict ,_a : int ):
'''simple docstring'''
A_ : int = []
A_ : Any = """"""
A_ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
A_ : Dict = True
A_ : Union[str, Any] = []
else:
current_sub_tokens.append(_a )
A_ : str = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def _a ( self : int ,_a : List[int] ,_a : bool = False ,_a : bool = None ,_a : bool = True ,**_a : str ,):
'''simple docstring'''
A_ : Any = kwargs.pop("""use_source_tokenizer""" ,_a )
A_ : Union[str, Any] = self.convert_ids_to_tokens(_a ,skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
A_ : str = []
A_ : int = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
A_ : List[str] = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
A_ : Optional[int] = re.sub(r""" (\[(MASK|SEP)\])""" ,r"""\1""" ,""" """.join(_a ) )
else:
A_ : Tuple = """""".join(_a )
A_ : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
A_ : Optional[Any] = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,"""wb""" ) as fi:
A_ : str = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def _a ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : List[Any] = [self.cls_token_id]
A_ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def _a ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Tuple = [self.sep_token_id]
A_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
__magic_name__ = 1.60_21e-19 # units = C
def lowerCamelCase ( lowerCamelCase : float , lowerCamelCase : float , lowerCamelCase : float , ):
if (conductivity, electron_conc, mobility).count(0) != 1:
raise ValueError("""You cannot supply more or less than 2 values""")
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""")
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""")
elif mobility < 0:
raise ValueError("""mobility cannot be negative""")
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
a_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _a ( self : List[str] ,_a : int ,_a : Any ,_a : int ):
'''simple docstring'''
A_ : Dict = TextaTextGenerationPipeline(model=_a ,tokenizer=_a )
return generator, ["Something to write", "Something else"]
def _a ( self : str ,_a : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : Any = generator("""Something there""" )
self.assertEqual(_a ,[{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
A_ : List[Any] = generator(["""This is great !""", """Something else"""] ,num_return_sequences=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
A_ : List[str] = generator(
["""This is great !""", """Something else"""] ,num_return_sequences=2 ,batch_size=2 ,do_sample=_a )
self.assertEqual(
_a ,[
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] ,)
with self.assertRaises(_a ):
generator(4 )
@require_torch
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : int = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""pt""" )
# do_sample=False necessary for reproducibility
A_ : Tuple = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
A_ : Optional[int] = 3
A_ : Tuple = generator(
"""Something there""" ,num_return_sequences=_a ,num_beams=_a ,)
A_ : Optional[Any] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a ,_a )
A_ : Optional[int] = generator("""This is a test""" ,do_sample=_a ,num_return_sequences=2 ,return_tensors=_a )
self.assertEqual(
_a ,[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] ,)
A_ : Dict = generator.model.config.eos_token_id
A_ : Optional[int] = """<pad>"""
A_ : List[Any] = generator(
["""This is a test""", """This is a second test"""] ,do_sample=_a ,num_return_sequences=2 ,batch_size=2 ,return_tensors=_a ,)
self.assertEqual(
_a ,[
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] ,)
@require_tf
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""tf""" )
# do_sample=False necessary for reproducibility
A_ : Dict = generator("""Something there""" ,do_sample=_a )
self.assertEqual(_a ,[{"""generated_text""": """"""}] )
| 665 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__magic_name__ = None
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__magic_name__ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
__magic_name__ = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
__magic_name__ = '▁'
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = BigBirdTokenizer
a_ = ["""input_ids""", """attention_mask"""]
a_ = []
def __init__( self : Tuple ,_a : Dict=None ,_a : Optional[Any]=None ,_a : str="<unk>" ,_a : Any="<s>" ,_a : Optional[int]="</s>" ,_a : Optional[int]="<pad>" ,_a : List[str]="[SEP]" ,_a : List[str]="[MASK]" ,_a : int="[CLS]" ,**_a : Any ,):
'''simple docstring'''
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : List[str] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
A_ : List[str] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
super().__init__(
_a ,tokenizer_file=_a ,bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,**_a ,)
A_ : List[Any] = vocab_file
A_ : Optional[int] = False if not self.vocab_file else True
def _a ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : int = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def _a ( self : List[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : List[Any] = [self.sep_token_id]
A_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file ,_a )
return (out_vocab_file,)
| 665 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """gpt_bigcode"""
a_ = ["""past_key_values"""]
a_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int] ,_a : Optional[int]=50257 ,_a : Dict=1024 ,_a : Union[str, Any]=768 ,_a : Union[str, Any]=12 ,_a : Union[str, Any]=12 ,_a : Tuple=None ,_a : int="gelu_pytorch_tanh" ,_a : Optional[Any]=0.1 ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : List[Any]=1e-5 ,_a : List[str]=0.02 ,_a : Any=True ,_a : Union[str, Any]=True ,_a : Tuple=50256 ,_a : Optional[int]=50256 ,_a : int=True ,_a : Optional[int]=True ,_a : Optional[int]=True ,**_a : List[str] ,):
'''simple docstring'''
A_ : Optional[Any] = vocab_size
A_ : int = n_positions
A_ : Union[str, Any] = n_embd
A_ : int = n_layer
A_ : Optional[int] = n_head
A_ : Union[str, Any] = n_inner
A_ : List[Any] = activation_function
A_ : Dict = resid_pdrop
A_ : int = embd_pdrop
A_ : Optional[int] = attn_pdrop
A_ : Union[str, Any] = layer_norm_epsilon
A_ : int = initializer_range
A_ : Union[str, Any] = scale_attn_weights
A_ : List[str] = use_cache
A_ : Tuple = attention_softmax_in_fpaa
A_ : List[str] = scale_attention_softmax_in_fpaa
A_ : Union[str, Any] = multi_query
A_ : Any = bos_token_id
A_ : Optional[int] = eos_token_id
super().__init__(bos_token_id=_a ,eos_token_id=_a ,**_a )
| 665 | 1 |
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def lowerCamelCase ( lowerCamelCase : Features):
A_ : Any = np.inf
def set_batch_size(lowerCamelCase : FeatureType) -> None:
nonlocal batch_size
if isinstance(lowerCamelCase , lowerCamelCase):
A_ : int = min(lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS)
elif isinstance(lowerCamelCase , lowerCamelCase):
A_ : List[str] = min(lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS)
elif isinstance(lowerCamelCase , lowerCamelCase) and feature.dtype == "binary":
A_ : Tuple = min(lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS)
_visit(lowerCamelCase , lowerCamelCase)
return None if batch_size is np.inf else batch_size
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Any ,_a : NestedDataStructureLike[PathLike] ,_a : Optional[NamedSplit] = None ,_a : Optional[Features] = None ,_a : str = None ,_a : bool = False ,_a : bool = False ,_a : Optional[int] = None ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(
_a ,split=_a ,features=_a ,cache_dir=_a ,keep_in_memory=_a ,streaming=_a ,num_proc=_a ,**_a ,)
A_ : Optional[Any] = path_or_paths if isinstance(_a ,_a ) else {self.split: path_or_paths}
A_ : Optional[int] = _PACKAGED_DATASETS_MODULES["""parquet"""][1]
A_ : str = Parquet(
cache_dir=_a ,data_files=_a ,features=_a ,hash=_a ,**_a ,)
def _a ( self : List[Any] ):
'''simple docstring'''
if self.streaming:
A_ : Union[str, Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
A_ : Optional[int] = None
A_ : List[Any] = None
A_ : Union[str, Any] = None
A_ : List[Any] = None
self.builder.download_and_prepare(
download_config=_a ,download_mode=_a ,verification_mode=_a ,base_path=_a ,num_proc=self.num_proc ,)
A_ : Union[str, Any] = self.builder.as_dataset(
split=self.split ,verification_mode=_a ,in_memory=self.keep_in_memory )
return dataset
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple ,_a : Dataset ,_a : Union[PathLike, BinaryIO] ,_a : Optional[int] = None ,**_a : List[str] ,):
'''simple docstring'''
A_ : Tuple = dataset
A_ : Tuple = path_or_buf
A_ : Optional[Any] = batch_size or get_writer_batch_size(dataset.features )
A_ : List[str] = parquet_writer_kwargs
def _a ( self : Any ):
'''simple docstring'''
A_ : List[str] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with open(self.path_or_buf ,"""wb+""" ) as buffer:
A_ : Dict = self._write(file_obj=_a ,batch_size=_a ,**self.parquet_writer_kwargs )
else:
A_ : List[Any] = self._write(file_obj=self.path_or_buf ,batch_size=_a ,**self.parquet_writer_kwargs )
return written
def _a ( self : Optional[Any] ,_a : BinaryIO ,_a : int ,**_a : Dict ):
'''simple docstring'''
A_ : List[Any] = 0
A_ : int = parquet_writer_kwargs.pop("""path_or_buf""" ,_a )
A_ : Union[str, Any] = self.dataset.features.arrow_schema
A_ : List[Any] = pq.ParquetWriter(_a ,schema=_a ,**_a )
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,_a ) ,unit="""ba""" ,disable=not logging.is_progress_bar_enabled() ,desc="""Creating parquet from Arrow format""" ,):
A_ : Optional[Any] = query_table(
table=self.dataset._data ,key=slice(_a ,offset + batch_size ) ,indices=self.dataset._indices if self.dataset._indices is not None else None ,)
writer.write_table(_a )
written += batch.nbytes
writer.close()
return written
| 665 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__magic_name__ = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__magic_name__ = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase ( ):
A_ : Union[str, Any] = (
list(range(ord("""!""") , ord("""~""") + 1)) + list(range(ord("""¡""") , ord("""¬""") + 1)) + list(range(ord("""®""") , ord("""ÿ""") + 1))
)
A_ : Optional[Any] = bs[:]
A_ : List[str] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase)
cs.append(2**8 + n)
n += 1
A_ : List[Any] = [chr(lowerCamelCase) for n in cs]
return dict(zip(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int):
A_ : int = set()
A_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
A_ : List[str] = char
return pairs
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : int ,_a : Tuple ,_a : Union[str, Any] ,_a : Optional[Any]="replace" ,_a : Union[str, Any]="<s>" ,_a : Union[str, Any]="</s>" ,_a : int="</s>" ,_a : List[str]="<s>" ,_a : List[Any]="<unk>" ,_a : Any="<pad>" ,_a : Dict="<mask>" ,_a : Optional[int]=False ,**_a : List[Any] ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
super().__init__(
errors=_a ,bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,**_a ,)
with open(_a ,encoding="""utf-8""" ) as vocab_handle:
A_ : str = json.load(_a )
A_ : Optional[int] = {v: k for k, v in self.encoder.items()}
A_ : List[str] = errors # how to handle errors in decoding
A_ : List[str] = bytes_to_unicode()
A_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_a ,encoding="""utf-8""" ) as merges_handle:
A_ : Any = merges_handle.read().split("""\n""" )[1:-1]
A_ : str = [tuple(merge.split() ) for merge in bpe_merges]
A_ : int = dict(zip(_a ,range(len(_a ) ) ) )
A_ : List[Any] = {}
A_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A_ : Optional[Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def _a ( self : Any ):
'''simple docstring'''
return len(self.encoder )
def _a ( self : str ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def _a ( self : int ,_a : int ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A_ : Optional[int] = tuple(_a )
A_ : Any = get_pairs(_a )
if not pairs:
return token
while True:
A_ : Optional[Any] = min(_a ,key=lambda _a : self.bpe_ranks.get(_a ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A_ , A_ : Dict = bigram
A_ : int = []
A_ : Optional[Any] = 0
while i < len(_a ):
try:
A_ : List[str] = word.index(_a ,_a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A_ : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A_ : str = tuple(_a )
A_ : str = new_word
if len(_a ) == 1:
break
else:
A_ : int = get_pairs(_a )
A_ : Optional[int] = """ """.join(_a )
A_ : List[str] = word
return word
def _a ( self : Dict ,_a : Optional[int] ):
'''simple docstring'''
A_ : Any = []
for token in re.findall(self.pat ,_a ):
A_ : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(""" """ ) )
return bpe_tokens
def _a ( self : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
return self.encoder.get(_a ,self.encoder.get(self.unk_token ) )
def _a ( self : int ,_a : Dict ):
'''simple docstring'''
return self.decoder.get(_a )
def _a ( self : Optional[int] ,_a : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = """""".join(_a )
A_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_a ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_a ,ensure_ascii=_a ) + """\n""" )
A_ : int = 0
with open(_a ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
A_ : Dict = token_index
writer.write(""" """.join(_a ) + """\n""" )
index += 1
return vocab_file, merge_file
def _a ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : int = [self.cls_token_id]
A_ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Union[str, Any] = [self.sep_token_id]
A_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : str ,_a : Optional[int] ,_a : Union[str, Any]=False ,**_a : Dict ):
'''simple docstring'''
A_ : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
A_ : Optional[int] = """ """ + text
return (text, kwargs)
| 665 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_a : int ,_a : Optional[Any]=7 ,_a : Optional[int]=3 ,_a : int=18 ,_a : List[Any]=30 ,_a : Dict=400 ,_a : int=True ,_a : List[str]=None ,_a : List[str]=True ,):
'''simple docstring'''
A_ : Union[str, Any] = size if size is not None else {"""height""": 18, """width""": 18}
A_ : List[str] = parent
A_ : List[Any] = batch_size
A_ : int = num_channels
A_ : List[Any] = image_size
A_ : Tuple = min_resolution
A_ : List[str] = max_resolution
A_ : Dict = do_resize
A_ : str = size
A_ : Any = apply_ocr
def _a ( self : Tuple ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a ( self : str ):
'''simple docstring'''
A_ : Any = LayoutLMvaImageProcessingTester(self )
@property
def _a ( self : Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self : int ):
'''simple docstring'''
A_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a ,"""do_resize""" ) )
self.assertTrue(hasattr(_a ,"""size""" ) )
self.assertTrue(hasattr(_a ,"""apply_ocr""" ) )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 18, """width""": 18} )
A_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} )
def _a ( self : List[str] ):
'''simple docstring'''
pass
def _a ( self : int ):
'''simple docstring'''
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a ,Image.Image )
# Test not batched input
A_ : List[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
self.assertIsInstance(encoding.words ,_a )
self.assertIsInstance(encoding.boxes ,_a )
# Test batched
A_ : Union[str, Any] = image_processing(_a ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def _a ( self : Any ):
'''simple docstring'''
A_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_a ,numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a ,np.ndarray )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
A_ : Optional[int] = image_processing(_a ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_a ,torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a ,torch.Tensor )
# Test not batched input
A_ : Optional[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
A_ : Any = image_processing(_a ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def _a ( self : int ):
'''simple docstring'''
A_ : int = LayoutLMvaImageProcessor()
from datasets import load_dataset
A_ : int = load_dataset("""hf-internal-testing/fixtures_docvqa""" ,split="""test""" )
A_ : int = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
A_ : Tuple = image_processing(_a ,return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
A_ : Dict = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
A_ : Optional[Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,_a )
self.assertListEqual(encoding.boxes ,_a )
# with apply_OCR = False
A_ : Any = LayoutLMvaImageProcessor(apply_ocr=_a )
A_ : Optional[Any] = image_processing(_a ,return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 665 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.txt'}
__magic_name__ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__magic_name__ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__magic_name__ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ConvBertTokenizer
def __init__( self : str ,_a : Dict=None ,_a : List[Any]=None ,_a : Dict=True ,_a : List[str]="[UNK]" ,_a : Any="[SEP]" ,_a : str="[PAD]" ,_a : List[Any]="[CLS]" ,_a : List[str]="[MASK]" ,_a : Union[str, Any]=True ,_a : Any=None ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
A_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_a ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_a ) != tokenize_chinese_chars
):
A_ : Dict = getattr(_a ,normalizer_state.pop("""type""" ) )
A_ : str = do_lower_case
A_ : Any = strip_accents
A_ : int = tokenize_chinese_chars
A_ : Tuple = normalizer_class(**_a )
A_ : Any = do_lower_case
def _a ( self : List[Any] ,_a : List[Any] ,_a : Any=None ):
'''simple docstring'''
A_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : int = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : List[Any] = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 665 | 1 |
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
__magic_name__ = logging.getLogger()
__magic_name__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _a ( self : List[Any] ,_a : Any ):
'''simple docstring'''
os.makedirs(_a ,exist_ok=_a )
A_ : Union[str, Any] = {"""source""": """What is love ?""", """target""": """life"""}
A_ : Union[str, Any] = {"""train""": 12, """val""": 2, """test""": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
A_ : Union[str, Any] = """\n""".join([contents[field]] * n_lines[split] )
with open(os.path.join(_a ,f'{split}.{field}' ) ,"""w""" ) as f:
f.write(_a )
def _a ( self : Any ,_a : int ,_a : str = "pytorch" ):
'''simple docstring'''
A_ : List[Any] = self.get_auto_remove_tmp_dir()
A_ : Optional[int] = os.path.join(_a ,"""output""" )
A_ : str = os.path.join(_a ,"""data""" )
self._create_dummy_data(data_dir=_a )
A_ : int = f'\n --data_dir {data_dir} \\n --output_dir {output_dir} \\n --model_name_or_path facebook/rag-sequence-base \\n --model_type rag_sequence \\n --do_train \\n --do_predict \\n --n_val -1 \\n --val_check_interval 1.0 \\n --train_batch_size 2 \\n --eval_batch_size 1 \\n --max_source_length 25 \\n --max_target_length 25 \\n --val_max_target_length 25 \\n --test_max_target_length 25 \\n --label_smoothing 0.1 \\n --dropout 0.1 \\n --attention_dropout 0.1 \\n --weight_decay 0.001 \\n --adam_epsilon 1e-08 \\n --max_grad_norm 0.1 \\n --lr_scheduler polynomial \\n --learning_rate 3e-04 \\n --num_train_epochs 1 \\n --warmup_steps 4 \\n --gradient_accumulation_steps 1 \\n --distributed-port 8787 \\n --use_dummy_dataset 1 \\n --distributed_retriever {distributed_retriever} \\n '.split()
if gpus > 0:
testargs.append(f'--gpus={gpus}' )
if is_apex_available():
testargs.append("""--fp16""" )
else:
testargs.append("""--gpus=0""" )
testargs.append("""--distributed_backend=ddp_cpu""" )
testargs.append("""--num_processes=2""" )
A_ : Dict = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(_a ,env=self.get_env() )
A_ : Union[str, Any] = os.path.join(_a ,"""metrics.json""" )
with open(_a ) as f:
A_ : Optional[Any] = json.load(_a )
return result
@require_torch_gpu
def _a ( self : str ):
'''simple docstring'''
A_ : int = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] ,0.2 )
@require_torch_multi_gpu
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Optional[int] = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] ,0.2 )
@require_torch_gpu
@require_ray
def _a ( self : int ):
'''simple docstring'''
A_ : Tuple = self._run_finetune(gpus=1 ,distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] ,0.2 )
@require_torch_multi_gpu
@require_ray
def _a ( self : List[str] ):
'''simple docstring'''
A_ : int = self._run_finetune(gpus=1 ,distributed_retriever="""ray""" )
self.assertGreaterEqual(result["""test"""][0]["""test_avg_em"""] ,0.2 )
| 665 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__magic_name__ = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = BartTokenizer
def __init__( self : str ,_a : Any=None ,_a : Optional[int]=None ,_a : int=None ,_a : Optional[int]="replace" ,_a : Dict="<s>" ,_a : Optional[Any]="</s>" ,_a : Dict="</s>" ,_a : Tuple="<s>" ,_a : Optional[Any]="<unk>" ,_a : List[str]="<pad>" ,_a : int="<mask>" ,_a : str=False ,_a : List[str]=True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(
_a ,_a ,tokenizer_file=_a ,errors=_a ,bos_token=_a ,eos_token=_a ,sep_token=_a ,cls_token=_a ,unk_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,trim_offsets=_a ,**_a ,)
A_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : List[str] = getattr(_a ,pre_tok_state.pop("""type""" ) )
A_ : Optional[int] = add_prefix_space
A_ : int = pre_tok_class(**_a )
A_ : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ : str = """post_processor"""
A_ : List[Any] = getattr(self.backend_tokenizer ,_a ,_a )
if tokenizer_component_instance:
A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : Tuple = tuple(state["""sep"""] )
if "cls" in state:
A_ : Tuple = tuple(state["""cls"""] )
A_ : List[str] = False
if state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : Dict = add_prefix_space
A_ : Any = True
if state.get("""trim_offsets""" ,_a ) != trim_offsets:
A_ : Union[str, Any] = trim_offsets
A_ : List[Any] = True
if changes_to_apply:
A_ : Optional[int] = getattr(_a ,state.pop("""type""" ) )
A_ : Tuple = component_class(**_a )
setattr(self.backend_tokenizer ,_a ,_a )
@property
def _a ( self : List[str] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _a ( self : Union[str, Any] ,_a : Any ):
'''simple docstring'''
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else value
A_ : List[Any] = value
def _a ( self : str ,*_a : str ,**_a : Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*_a ,**_a )
def _a ( self : str ,*_a : List[Any] ,**_a : str ):
'''simple docstring'''
A_ : List[str] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*_a ,**_a )
def _a ( self : Optional[int] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : str = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
def _a ( self : str ,_a : Optional[int] ,_a : int=None ):
'''simple docstring'''
A_ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Dict = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 665 | 1 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__magic_name__ = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : int ,_a : str ,_a : bool ,_a : str = None ,_a : list = None ):
'''simple docstring'''
A_ : str = None
A_ : str = os.path.abspath(os.path.join("""examples""" ,"""by_feature""" ) )
A_ : Tuple = os.path.abspath("""examples""" )
for item in os.listdir(_a ):
if item not in EXCLUDE_EXAMPLES:
A_ : Dict = os.path.join(_a ,_a )
if os.path.isfile(_a ) and ".py" in item_path:
with self.subTest(
tested_script=_a ,feature_script=_a ,tested_section="""main()""" if parser_only else """training_function()""" ,):
A_ : List[str] = compare_against_test(
os.path.join(_a ,_a ) ,_a ,_a ,_a )
A_ : List[str] = """\n""".join(_a )
if special_strings is not None:
for string in special_strings:
A_ : int = diff.replace(_a ,"""""" )
self.assertEqual(_a ,"""""" )
def _a ( self : Optional[Any] ):
'''simple docstring'''
self.one_complete_example("""complete_nlp_example.py""" ,_a )
self.one_complete_example("""complete_nlp_example.py""" ,_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : int = os.path.abspath(os.path.join("""examples""" ,"""cv_example.py""" ) )
A_ : Tuple = [
""" """ * 16 + """{\n\n""",
""" """ * 20 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 20 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 20 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 20 + """\"epoch\": epoch,\n\n""",
""" """ * 16 + """},\n\n""",
""" """ * 16 + """step=epoch,\n""",
""" """ * 12,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" ,_a ,_a ,_a )
self.one_complete_example("""complete_cv_example.py""" ,_a ,_a ,_a )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = False
@classmethod
def _a ( cls : Any ):
'''simple docstring'''
super().setUpClass()
A_ : List[str] = tempfile.mkdtemp()
A_ : Tuple = os.path.join(cls._tmpdir ,"""default_config.yml""" )
write_basic_config(save_location=cls.configPath )
A_ : Any = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def _a ( cls : List[str] ):
'''simple docstring'''
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : List[str] = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir ,"""epoch_0""" ) ) )
def _a ( self : Any ):
'''simple docstring'''
A_ : int = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
A_ : Tuple = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir ,"""step_2""" ) ) )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Tuple = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir ,"epoch_0" )}\n '.split()
A_ : List[Any] = run_command(self._launch_args + testargs ,return_stdout=_a )
self.assertNotIn("""epoch 0:""" ,_a )
self.assertIn("""epoch 1:""" ,_a )
def _a ( self : str ):
'''simple docstring'''
A_ : Tuple = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir ,"step_2" )}\n '.split()
A_ : Optional[int] = run_command(self._launch_args + testargs ,return_stdout=_a )
if torch.cuda.is_available():
A_ : Any = torch.cuda.device_count()
else:
A_ : List[Any] = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" ,_a )
self.assertIn("""epoch 1:""" ,_a )
else:
self.assertIn("""epoch 0:""" ,_a )
self.assertIn("""epoch 1:""" ,_a )
@slow
def _a ( self : List[str] ):
'''simple docstring'''
A_ : int = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ ,{"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
A_ : str = run_command(self._launch_args + testargs ,return_stdout=_a )
A_ : Optional[int] = re.findall("""({.+})""" ,_a )
A_ : Union[str, Any] = [r for r in results if """accuracy""" in r][-1]
A_ : int = ast.literal_eval(_a )
self.assertGreaterEqual(results["""accuracy"""] ,0.75 )
def _a ( self : Any ):
'''simple docstring'''
A_ : Optional[int] = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ ,{"""WANDB_MODE""": """offline"""} )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
A_ : Dict = f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_a ,"""tracking""" ) ) )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : List[Any] = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def _a ( self : Any ):
'''simple docstring'''
A_ : Any = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 665 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
A_ : int = TapasConfig.from_json_file(lowerCamelCase)
# set absolute/relative position embeddings parameter
A_ : List[Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
A_ : Optional[int] = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WTQ":
# run_task_main.py hparams
A_ : Tuple = 4
A_ : Optional[Any] = True
# hparam_utils.py hparams
A_ : Any = 0.66_4694
A_ : str = 0.20_7951
A_ : Any = 0.12_1194
A_ : str = True
A_ : Dict = True
A_ : int = False
A_ : int = 0.035_2513
A_ : Tuple = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
A_ : int = 4
A_ : Union[str, Any] = False
# hparam_utils.py hparams
A_ : Dict = 36.4519
A_ : List[Any] = 0.90_3421
A_ : Any = 222.088
A_ : Optional[Any] = True
A_ : Optional[int] = True
A_ : Optional[Any] = True
A_ : Optional[int] = 0.76_3141
A_ : Any = TapasForQuestionAnswering(config=lowerCamelCase)
elif task == "TABFACT":
A_ : Any = TapasForSequenceClassification(config=lowerCamelCase)
elif task == "MLM":
A_ : List[Any] = TapasForMaskedLM(config=lowerCamelCase)
elif task == "INTERMEDIATE_PRETRAINING":
A_ : Union[str, Any] = TapasModel(config=lowerCamelCase)
else:
raise ValueError(F'Task {task} not supported.')
print(F'Building PyTorch model from configuration: {config}')
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}')
model.save_pretrained(lowerCamelCase)
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}')
A_ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512)
tokenizer.save_pretrained(lowerCamelCase)
print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 665 | 1 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCamelCase ( lowerCamelCase : List[Any]):
if isinstance(lowerCamelCase , collections.abc.Iterable):
return x
return (x, x)
@require_tf
class __lowerCAmelCase :
'''simple docstring'''
def _a ( self : Union[str, Any] ,_a : Tuple ,_a : Dict ):
'''simple docstring'''
pass
def _a ( self : List[str] ):
'''simple docstring'''
pass
def _a ( self : List[str] ):
'''simple docstring'''
pass
def _a ( self : Any ,_a : List[Any] ,_a : Optional[Any] ,_a : int ,_a : str ,_a : Optional[int]=None ,**_a : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(_a ,_a )
A_ : Dict = TFVisionTextDualEncoderModel(_a )
A_ : Optional[Any] = model(input_ids=_a ,pixel_values=_a ,attention_mask=_a )
self.assertEqual(output["""text_embeds"""].shape ,(input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape ,(pixel_values.shape[0], config.projection_dim) )
def _a ( self : Dict ,_a : List[str] ,_a : Union[str, Any] ,_a : int ,_a : int ,_a : List[str]=None ,**_a : List[Any] ):
'''simple docstring'''
A_ , A_ : Optional[int] = self.get_vision_text_model(_a ,_a )
A_ : str = TFVisionTextDualEncoderModel(vision_model=_a ,text_model=_a )
A_ : Optional[Any] = model(input_ids=_a ,pixel_values=_a ,attention_mask=_a )
self.assertEqual(output["""text_embeds"""].shape ,(input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape ,(pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : Optional[int] ,_a : Optional[Any] ,_a : Dict ,_a : Optional[Any] ,_a : Dict ,_a : Union[str, Any]=None ,**_a : Dict ):
'''simple docstring'''
A_ , A_ : str = self.get_vision_text_model(_a ,_a )
A_ : Dict = {"""vision_model""": vision_model, """text_model""": text_model}
A_ : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_a )
A_ : Optional[Any] = model(input_ids=_a ,pixel_values=_a ,attention_mask=_a )
self.assertEqual(output["""text_embeds"""].shape ,(input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape ,(pixel_values.shape[0], model.config.projection_dim) )
def _a ( self : str ,_a : Dict ,_a : Union[str, Any] ,_a : Any ,_a : Optional[int] ,_a : Union[str, Any]=None ,**_a : Optional[int] ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.get_vision_text_model(_a ,_a )
A_ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=_a ,text_model=_a )
A_ : Optional[int] = model(input_ids=_a ,pixel_values=_a ,attention_mask=_a )
A_ : Tuple = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_a )
A_ : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(_a )
A_ : str = model(input_ids=_a ,pixel_values=_a ,attention_mask=_a )
A_ : List[Any] = after_output[0].numpy()
A_ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_a ,1e-5 )
def _a ( self : List[str] ,_a : Optional[Any] ,_a : List[str] ,_a : List[Any] ,_a : Dict ,_a : int=None ,**_a : str ):
'''simple docstring'''
A_ , A_ : List[Any] = self.get_vision_text_model(_a ,_a )
A_ : Dict = TFVisionTextDualEncoderModel(vision_model=_a ,text_model=_a )
A_ : int = model(
input_ids=_a ,pixel_values=_a ,attention_mask=_a ,output_attentions=_a )
A_ : Optional[Any] = output.vision_model_output.attentions
self.assertEqual(len(_a ) ,vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ : Dict = to_atuple(vision_model.config.image_size )
A_ : Optional[int] = to_atuple(vision_model.config.patch_size )
A_ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A_ : Dict = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) )
A_ : Dict = output.text_model_output.attentions
self.assertEqual(len(_a ) ,text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,)
def _a ( self : int ,_a : np.ndarray ,_a : np.ndarray ,_a : float ):
'''simple docstring'''
A_ : Optional[Any] = np.abs((a - b) ).max()
self.assertLessEqual(_a ,_a ,f'Difference between torch and flax is {diff} (>= {tol}).' )
def _a ( self : List[str] ):
'''simple docstring'''
A_ : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_a )
def _a ( self : Dict ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_a )
def _a ( self : List[str] ):
'''simple docstring'''
A_ : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_a )
def _a ( self : Dict ):
'''simple docstring'''
A_ : Union[str, Any] = self.prepare_config_and_inputs()
self.check_save_load(**_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_a )
@slow
def _a ( self : str ):
'''simple docstring'''
A_ , A_ : List[Any] = self.get_pretrained_model_and_inputs()
A_ : Optional[Any] = model_a(**_a )
A_ : int = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_a )
A_ : Union[str, Any] = TFVisionTextDualEncoderModel.from_pretrained(_a )
A_ : int = model_a(**_a )
A_ : Any = after_outputs[0].numpy()
A_ : Tuple = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_a ,1e-5 )
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def _a ( self : str ):
'''simple docstring'''
A_ : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" ,"""hf-internal-testing/tiny-random-bert""" )
A_ : Tuple = 13
A_ : Optional[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A_ : Union[str, Any] = ids_tensor([batch_size, 4] ,model.text_model.config.vocab_size )
A_ : Optional[Any] = random_attention_mask([batch_size, 4] )
A_ : Any = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : List[str] ,_a : int ,_a : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = TFViTModel(_a ,name="""vision_model""" )
A_ : Optional[Any] = TFBertModel(_a ,name="""text_model""" )
return vision_model, text_model
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Any = TFViTModelTester(self )
A_ : Dict = TFBertModelTester(self )
A_ : Tuple = vit_model_tester.prepare_config_and_inputs()
A_ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
A_ , A_ , A_ : Dict = vision_config_and_inputs
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Tuple = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def _a ( self : Dict ):
'''simple docstring'''
A_ : str = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-deit-tf""" ,"""hf-internal-testing/tiny-random-roberta""" )
A_ : int = 13
A_ : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A_ : Any = ids_tensor([batch_size, 4] ,model.text_model.config.vocab_size )
A_ : Any = random_attention_mask([batch_size, 4] )
A_ : Tuple = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Dict ,_a : str ,_a : List[str] ,_a : Dict ,_a : Union[str, Any] ,_a : Optional[Any]=None ,**_a : Tuple ):
'''simple docstring'''
A_ , A_ : Union[str, Any] = self.get_vision_text_model(_a ,_a )
A_ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=_a ,text_model=_a )
A_ : List[str] = model(
input_ids=_a ,pixel_values=_a ,attention_mask=_a ,output_attentions=_a )
A_ : Optional[Any] = output.vision_model_output.attentions
self.assertEqual(len(_a ) ,vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
A_ : Dict = to_atuple(vision_model.config.image_size )
A_ : int = to_atuple(vision_model.config.patch_size )
A_ : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
A_ : Tuple = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) )
A_ : int = output.text_model_output.attentions
self.assertEqual(len(_a ) ,text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,)
def _a ( self : Any ,_a : str ,_a : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = TFDeiTModel(_a ,name="""vision_model""" )
A_ : int = TFRobertaModel(_a ,name="""text_model""" )
return vision_model, text_model
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : str = TFDeiTModelTester(self )
A_ : Optional[int] = TFRobertaModelTester(self )
A_ : Dict = vit_model_tester.prepare_config_and_inputs()
A_ : int = bert_model_tester.prepare_config_and_inputs()
A_ , A_ , A_ : List[str] = vision_config_and_inputs
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Dict = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Dict = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"""Rocketknight1/tiny-random-clip-tf""" ,"""hf-internal-testing/tiny-random-bert""" )
A_ : Any = 13
A_ : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
A_ : List[Any] = ids_tensor([batch_size, 4] ,model.text_model.config.vocab_size )
A_ : Any = random_attention_mask([batch_size, 4] )
A_ : Tuple = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _a ( self : Optional[Any] ,_a : int ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = TFCLIPVisionModel(_a ,name="""vision_model""" )
A_ : List[Any] = TFBertModel(_a ,name="""text_model""" )
return vision_model, text_model
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[Any] = TFCLIPVisionModelTester(self )
A_ : List[Any] = TFBertModelTester(self )
A_ : List[Any] = clip_model_tester.prepare_config_and_inputs()
A_ : int = bert_model_tester.prepare_config_and_inputs()
A_ , A_ : Optional[int] = vision_config_and_inputs
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Optional[Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : Dict ):
'''simple docstring'''
A_ : Union[str, Any] = TFVisionTextDualEncoderModel.from_pretrained(
"""clip-italian/clip-italian""" ,logit_scale_init_value=1.0 ,from_pt=_a )
A_ : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
A_ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
A_ : Any = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] ,images=_a ,padding=_a ,return_tensors="""np""" )
A_ : str = model(**_a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape ,(inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape ,(inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) ,)
A_ : Dict = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() ,_a ,atol=1e-3 ) )
| 665 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""vqvae"""]
def __init__( self : Optional[Any] ,_a : AutoencoderKL ,_a : UNetaDConditionModel ,_a : Mel ,_a : Union[DDIMScheduler, DDPMScheduler] ,):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_a ,scheduler=_a ,mel=_a ,vqvae=_a )
def _a ( self : str ):
'''simple docstring'''
return 50 if isinstance(self.scheduler ,_a ) else 1000
@torch.no_grad()
def __call__( self : Optional[int] ,_a : int = 1 ,_a : str = None ,_a : np.ndarray = None ,_a : int = 0 ,_a : int = 0 ,_a : int = None ,_a : torch.Generator = None ,_a : float = 0 ,_a : float = 0 ,_a : torch.Generator = None ,_a : float = 0 ,_a : torch.Tensor = None ,_a : torch.Tensor = None ,_a : int=True ,):
'''simple docstring'''
A_ : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_a )
A_ : Union[str, Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
A_ : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
A_ : int = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) ,generator=_a ,device=self.device ,)
A_ : List[Any] = noise
A_ : str = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_a ,_a )
A_ : Any = self.mel.audio_slice_to_image(_a )
A_ : Union[str, Any] = np.frombuffer(input_image.tobytes() ,dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
A_ : Optional[Any] = (input_image / 255) * 2 - 1
A_ : Union[str, Any] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device )
if self.vqvae is not None:
A_ : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(_a ,0 ) ).latent_dist.sample(
generator=_a )[0]
A_ : List[str] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
A_ : Any = self.scheduler.add_noise(_a ,_a ,self.scheduler.timesteps[start_step - 1] )
A_ : Tuple = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
A_ : Tuple = int(mask_start_secs * pixels_per_second )
A_ : str = int(mask_end_secs * pixels_per_second )
A_ : int = self.scheduler.add_noise(_a ,_a ,torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet ,_a ):
A_ : Optional[Any] = self.unet(_a ,_a ,_a )["""sample"""]
else:
A_ : List[Any] = self.unet(_a ,_a )["""sample"""]
if isinstance(self.scheduler ,_a ):
A_ : Dict = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,eta=_a ,generator=_a ,)["""prev_sample"""]
else:
A_ : Any = self.scheduler.step(
model_output=_a ,timestep=_a ,sample=_a ,generator=_a ,)["""prev_sample"""]
if mask is not None:
if mask_start > 0:
A_ : Tuple = mask[:, step, :, :mask_start]
if mask_end > 0:
A_ : List[str] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
A_ : str = 1 / self.vqvae.config.scaling_factor * images
A_ : Union[str, Any] = self.vqvae.decode(_a )["""sample"""]
A_ : int = (images / 2 + 0.5).clamp(0 ,1 )
A_ : str = images.cpu().permute(0 ,2 ,3 ,1 ).numpy()
A_ : Optional[int] = (images * 255).round().astype("""uint8""" )
A_ : List[Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_a ,mode="""RGB""" ).convert("""L""" ) for _ in images) )
A_ : Tuple = [self.mel.image_to_audio(_a ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_a )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_a ) )
@torch.no_grad()
def _a ( self : Union[str, Any] ,_a : List[Image.Image] ,_a : int = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler ,_a )
self.scheduler.set_timesteps(_a )
A_ : Optional[Any] = np.array(
[np.frombuffer(image.tobytes() ,dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
A_ : List[str] = (sample / 255) * 2 - 1
A_ : Optional[int] = torch.Tensor(_a ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ):
A_ : List[str] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
A_ : Any = self.scheduler.alphas_cumprod[t]
A_ : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
A_ : str = 1 - alpha_prod_t
A_ : List[str] = self.unet(_a ,_a )["""sample"""]
A_ : str = (1 - alpha_prod_t_prev) ** 0.5 * model_output
A_ : Union[str, Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
A_ : Optional[int] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _a ( _a : torch.Tensor ,_a : torch.Tensor ,_a : float ):
'''simple docstring'''
A_ : List[Any] = acos(torch.dot(torch.flatten(_a ) ,torch.flatten(_a ) ) / torch.norm(_a ) / torch.norm(_a ) )
return sin((1 - alpha) * theta ) * xa / sin(_a ) + sin(alpha * theta ) * xa / sin(_a )
| 665 | 1 |
'''simple docstring'''
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__magic_name__ = True
except ImportError:
__magic_name__ = False
try:
from torch.hub import _get_torch_home
__magic_name__ = _get_torch_home()
except ImportError:
__magic_name__ = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
__magic_name__ = os.path.join(torch_cache_home, 'transformers')
__magic_name__ = 'https://cdn.huggingface.co'
__magic_name__ = 'https://s3.amazonaws.com/models.huggingface.co/bert'
__magic_name__ = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
__magic_name__ = os.path.join(PATH, 'config.yaml')
__magic_name__ = os.path.join(PATH, 'attributes.txt')
__magic_name__ = os.path.join(PATH, 'objects.txt')
__magic_name__ = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
__magic_name__ = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
__magic_name__ = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
__magic_name__ = 'pytorch_model.bin'
__magic_name__ = 'config.yaml'
def lowerCamelCase ( lowerCamelCase : Union[str, Any]=OBJECTS , lowerCamelCase : str=ATTRIBUTES):
A_ : Optional[int] = []
with open(lowerCamelCase) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""")[0].lower().strip())
A_ : Dict = []
with open(lowerCamelCase) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""")[0].lower().strip())
return vg_classes, vg_attrs
def lowerCamelCase ( lowerCamelCase : int):
A_ : Union[str, Any] = OrderedDict()
with open(lowerCamelCase , """rb""") as f:
A_ : List[Any] = pkl.load(lowerCamelCase)["""model"""]
for k in copy.deepcopy(list(ckp.keys())):
A_ : str = ckp.pop(lowerCamelCase)
if isinstance(lowerCamelCase , np.ndarray):
A_ : Union[str, Any] = torch.tensor(lowerCamelCase)
else:
assert isinstance(lowerCamelCase , torch.tensor), type(lowerCamelCase)
A_ : str = v
return r
class __lowerCAmelCase :
'''simple docstring'''
a_ = {}
def __init__( self : Tuple ,_a : dict ,_a : str = "root" ,_a : Tuple=0 ):
'''simple docstring'''
A_ : Any = name
A_ : str = level
A_ : Tuple = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
A_ : List[str] = copy.deepcopy(_a )
A_ : Optional[Any] = copy.deepcopy(_a )
if isinstance(_a ,_a ):
A_ : Dict = Config(_a ,name=_a ,level=level + 1 )
A_ : Any = v
setattr(self ,_a ,_a )
A_ : Tuple = d
def __repr__( self : Tuple ):
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Dict ,_a : List[Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = val
A_ : int = val
A_ : str = key.split(""".""" )
A_ : int = len(_a ) - 1
A_ : List[str] = self._pointer
if len(_a ) > 1:
for i, l in enumerate(_a ):
if hasattr(self ,_a ) and isinstance(getattr(self ,_a ) ,_a ):
setattr(getattr(self ,_a ) ,""".""".join(levels[i:] ) ,_a )
if l == last_level:
A_ : Any = val
else:
A_ : Optional[int] = pointer[l]
def _a ( self : int ):
'''simple docstring'''
return self._pointer
def _a ( self : Union[str, Any] ,_a : Any ,_a : Optional[int] ):
'''simple docstring'''
with open(f'{file_name}' ,"""w""" ) as stream:
dump(_a ,_a )
def _a ( self : Optional[int] ,_a : Any ,_a : Optional[int] ):
'''simple docstring'''
with open(f'{file_name}' ,"""w""" ) as stream:
json.dump(_a ,_a )
@staticmethod
def _a ( _a : List[Any] ):
'''simple docstring'''
with open(_a ) as stream:
A_ : Tuple = load(_a ,Loader=_a )
return data
def __str__( self : Any ):
'''simple docstring'''
A_ : List[Any] = """ """
if self._name != "root":
A_ : List[Any] = f'{t * (self._level-1)}{self._name}:\n'
else:
A_ : Optional[Any] = """"""
A_ : Any = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_a ,_a ):
r += f'{t * (self._level)}{v}\n'
self._level += 1
else:
r += f'{t * (self._level)}{k}: {v} ({type(_a ).__name__})\n'
A_ : Any = level
return r[:-1]
@classmethod
def _a ( cls : Tuple ,_a : str ,**_a : List[Any] ):
'''simple docstring'''
A_ , A_ : Union[str, Any] = cls.get_config_dict(_a ,**_a )
return cls(_a )
@classmethod
def _a ( cls : int ,_a : str ,**_a : str ):
'''simple docstring'''
A_ : Tuple = kwargs.pop("""cache_dir""" ,_a )
A_ : Optional[int] = kwargs.pop("""force_download""" ,_a )
A_ : Any = kwargs.pop("""resume_download""" ,_a )
A_ : Tuple = kwargs.pop("""proxies""" ,_a )
A_ : Union[str, Any] = kwargs.pop("""local_files_only""" ,_a )
if os.path.isdir(_a ):
A_ : Any = os.path.join(_a ,_a )
elif os.path.isfile(_a ) or is_remote_url(_a ):
A_ : Dict = pretrained_model_name_or_path
else:
A_ : Union[str, Any] = hf_bucket_url(_a ,filename=_a ,use_cdn=_a )
try:
# Load from URL or cache if already cached
A_ : Any = cached_path(
_a ,cache_dir=_a ,force_download=_a ,proxies=_a ,resume_download=_a ,local_files_only=_a ,)
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
A_ : List[str] = Config.load_yaml(_a )
except EnvironmentError:
A_ : Optional[int] = """Can't load config for"""
raise EnvironmentError(_a )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(_a ), kwargs
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
A_ : Tuple = torch.load("""dump.pt""" , map_location=in_tensor.device)
A_ : Any = in_tensor.numpy()
A_ : Union[str, Any] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5])
print(na.shape , na[0, 0, :5])
assert np.allclose(lowerCamelCase , lowerCamelCase , rtol=0.01 , atol=0.1), (
F'{sum([1 for x in np.isclose(lowerCamelCase , lowerCamelCase , rtol=0.01 , atol=0.1).flatten() if x is False])/len(na.flatten())*100:.4f} %'
" element-wise mismatch"
)
raise Exception("""tensors are all good""")
# Hugging face functions below
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
A_ : Optional[int] = urlparse(lowerCamelCase)
return parsed.scheme in ("http", "https")
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Tuple=True):
A_ : str = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
A_ : Optional[int] = """/""" not in model_id
if legacy_format:
return F'{endpoint}/{model_id}-{filename}'
else:
return F'{endpoint}/{model_id}/{filename}'
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any]=None , lowerCamelCase : List[Any]=0 , lowerCamelCase : Any=None , ):
A_ : Union[str, Any] = """python/{}""".format(sys.version.split()[0])
if _torch_available:
ua += "; torch/{}".format(torch.__version__)
if isinstance(lowerCamelCase , lowerCamelCase):
ua += "; " + "; ".join("""{}/{}""".format(lowerCamelCase , lowerCamelCase) for k, v in user_agent.items())
elif isinstance(lowerCamelCase , lowerCamelCase):
ua += "; " + user_agent
A_ : Any = {"""user-agent""": ua}
if resume_size > 0:
A_ : str = """bytes=%d-""" % (resume_size,)
A_ : List[str] = requests.get(lowerCamelCase , stream=lowerCamelCase , proxies=lowerCamelCase , headers=lowerCamelCase)
if response.status_code == 416: # Range not satisfiable
return
A_ : Union[str, Any] = response.headers.get("""Content-Length""")
A_ : List[str] = resume_size + int(lowerCamelCase) if content_length is not None else None
A_ : str = tqdm(
unit="""B""" , unit_scale=lowerCamelCase , total=lowerCamelCase , initial=lowerCamelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowerCamelCase))
temp_file.write(lowerCamelCase)
progress.close()
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any]=None , lowerCamelCase : List[str]=False , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : List[Any]=10 , lowerCamelCase : Optional[Any]=False , lowerCamelCase : List[Any]=None , lowerCamelCase : str=False , ):
if cache_dir is None:
A_ : Optional[int] = TRANSFORMERS_CACHE
if isinstance(lowerCamelCase , lowerCamelCase):
A_ : Any = str(lowerCamelCase)
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase)
A_ : Dict = None
if not local_files_only:
try:
A_ : int = requests.head(lowerCamelCase , allow_redirects=lowerCamelCase , proxies=lowerCamelCase , timeout=lowerCamelCase)
if response.status_code == 200:
A_ : List[str] = response.headers.get("""ETag""")
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
A_ : str = url_to_filename(lowerCamelCase , lowerCamelCase)
# get cache path to put the file
A_ : List[Any] = os.path.join(lowerCamelCase , lowerCamelCase)
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowerCamelCase):
return cache_path
else:
A_ : Any = [
file
for file in fnmatch.filter(os.listdir(lowerCamelCase) , filename + """.*""")
if not file.endswith(""".json""") and not file.endswith(""".lock""")
]
if len(lowerCamelCase) > 0:
return os.path.join(lowerCamelCase , matching_files[-1])
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""")
return None
# From now on, etag is not None.
if os.path.exists(lowerCamelCase) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
A_ : Any = cache_path + """.lock"""
with FileLock(lowerCamelCase):
# If the download just completed while the lock was activated.
if os.path.exists(lowerCamelCase) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
A_ : str = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(lowerCamelCase , """a+b""") as f:
yield f
A_ : str = _resumable_file_manager
if os.path.exists(lowerCamelCase):
A_ : List[str] = os.stat(lowerCamelCase).st_size
else:
A_ : Dict = 0
else:
A_ : int = partial(tempfile.NamedTemporaryFile , dir=lowerCamelCase , delete=lowerCamelCase)
A_ : Union[str, Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , lowerCamelCase , temp_file.name , )
http_get(
lowerCamelCase , lowerCamelCase , proxies=lowerCamelCase , resume_size=lowerCamelCase , user_agent=lowerCamelCase , )
os.replace(temp_file.name , lowerCamelCase)
A_ : Tuple = {"""url""": url, """etag""": etag}
A_ : int = cache_path + """.json"""
with open(lowerCamelCase , """w""") as meta_file:
json.dump(lowerCamelCase , lowerCamelCase)
return cache_path
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Tuple=None):
A_ : Optional[int] = url.encode("""utf-8""")
A_ : Any = shaaaa(lowerCamelCase)
A_ : Optional[Any] = url_hash.hexdigest()
if etag:
A_ : Any = etag.encode("""utf-8""")
A_ : List[Any] = shaaaa(lowerCamelCase)
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5"""):
filename += ".h5"
return filename
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : str=None , lowerCamelCase : str=False , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : List[Any]=False , lowerCamelCase : Any=None , lowerCamelCase : Any=False , lowerCamelCase : Any=False , lowerCamelCase : List[Any]=False , ):
if cache_dir is None:
A_ : Union[str, Any] = TRANSFORMERS_CACHE
if isinstance(lowerCamelCase , lowerCamelCase):
A_ : Optional[Any] = str(lowerCamelCase)
if isinstance(lowerCamelCase , lowerCamelCase):
A_ : Optional[Any] = str(lowerCamelCase)
if is_remote_url(lowerCamelCase):
# URL, so get it from the cache (downloading if necessary)
A_ : int = get_from_cache(
lowerCamelCase , cache_dir=lowerCamelCase , force_download=lowerCamelCase , proxies=lowerCamelCase , resume_download=lowerCamelCase , user_agent=lowerCamelCase , local_files_only=lowerCamelCase , )
elif os.path.exists(lowerCamelCase):
# File, and it exists.
A_ : Dict = url_or_filename
elif urlparse(lowerCamelCase).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(lowerCamelCase))
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(lowerCamelCase))
if extract_compressed_file:
if not is_zipfile(lowerCamelCase) and not tarfile.is_tarfile(lowerCamelCase):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
A_ , A_ : Optional[int] = os.path.split(lowerCamelCase)
A_ : int = output_file.replace(""".""" , """-""") + """-extracted"""
A_ : Dict = os.path.join(lowerCamelCase , lowerCamelCase)
if os.path.isdir(lowerCamelCase) and os.listdir(lowerCamelCase) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
A_ : Optional[Any] = output_path + """.lock"""
with FileLock(lowerCamelCase):
shutil.rmtree(lowerCamelCase , ignore_errors=lowerCamelCase)
os.makedirs(lowerCamelCase)
if is_zipfile(lowerCamelCase):
with ZipFile(lowerCamelCase , """r""") as zip_file:
zip_file.extractall(lowerCamelCase)
zip_file.close()
elif tarfile.is_tarfile(lowerCamelCase):
A_ : Any = tarfile.open(lowerCamelCase)
tar_file.extractall(lowerCamelCase)
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(lowerCamelCase))
return output_path_extracted
return output_path
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : int=","):
assert isinstance(lowerCamelCase , lowerCamelCase)
if os.path.isfile(lowerCamelCase):
with open(lowerCamelCase) as f:
A_ : Optional[Any] = eval(f.read())
else:
A_ : Any = requests.get(lowerCamelCase)
try:
A_ : List[str] = requests.json()
except Exception:
A_ : Optional[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
A_ : int = eval(lowerCamelCase)
except Exception:
A_ : List[str] = data.split("""\n""")
req.close()
return data
def lowerCamelCase ( lowerCamelCase : List[Any]):
A_ : Optional[Any] = requests.get(lowerCamelCase)
A_ : Any = np.array(Image.open(BytesIO(response.content)))
return img
def lowerCamelCase ( lowerCamelCase : str):
A_ : Union[str, Any] = url.split("""/""")[-1]
if fn not in os.listdir(os.getcwd()):
wget.download(lowerCamelCase)
with open(lowerCamelCase , """rb""") as stream:
A_ : Optional[int] = pkl.load(lowerCamelCase)
A_ : Optional[Any] = weights.pop("""model""")
A_ : Dict = {}
for k, v in model.items():
A_ : List[Any] = torch.from_numpy(lowerCamelCase)
if "running_var" in k:
A_ : Union[str, Any] = torch.tensor([0])
A_ : Dict = k.replace("""running_var""" , """num_batches_tracked""")
A_ : int = zero
return new
def lowerCamelCase ( ):
print(F'{os.path.abspath(os.path.join(lowerCamelCase , os.pardir))}/demo.ipynb')
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any]="RGB"):
assert isinstance(lowerCamelCase , lowerCamelCase)
if os.path.isfile(lowerCamelCase):
A_ : List[str] = cva.imread(lowerCamelCase)
else:
A_ : int = get_image_from_url(lowerCamelCase)
assert img is not None, F'could not connect to: {im}'
A_ : Optional[Any] = cva.cvtColor(lowerCamelCase , cva.COLOR_BGR2RGB)
if input_format == "RGB":
A_ : Dict = img[:, :, ::-1]
return img
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : List[Any]=1):
return (images[i : i + batch] for i in range(0 , len(lowerCamelCase) , lowerCamelCase))
| 665 |
'''simple docstring'''
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__magic_name__ = 16
__magic_name__ = 32
def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16):
A_ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""")
A_ : str = load_dataset("""glue""" , """mrpc""")
def tokenize_function(lowerCamelCase : Dict):
# max_length=None => use the model max length (it's actually the default)
A_ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A_ : Tuple = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(lowerCamelCase : Tuple):
# On TPU it's best to pad everything to the same length or training will be very slow.
A_ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A_ : List[Any] = 16
elif accelerator.mixed_precision != "no":
A_ : Any = 8
else:
A_ : Tuple = None
return tokenizer.pad(
lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
A_ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=lowerCamelCase)
A_ : str = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Dict):
# Initialize accelerator
A_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : List[Any] = config["""lr"""]
A_ : List[Any] = int(config["""num_epochs"""])
A_ : int = int(config["""seed"""])
A_ : Dict = int(config["""batch_size"""])
A_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""")
# If the batch size is too big we use gradient accumulation
A_ : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A_ : Any = batch_size // MAX_GPU_BATCH_SIZE
A_ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(lowerCamelCase)
A_ , A_ : List[str] = get_dataloaders(lowerCamelCase , lowerCamelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ : str = model.to(accelerator.device)
# Instantiate optimizer
A_ : str = AdamW(params=model.parameters() , lr=lowerCamelCase)
# Instantiate scheduler
A_ : Tuple = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ , A_ , A_ , A_ , A_ : Union[str, Any] = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Now we train the model
for epoch in range(lowerCamelCase):
model.train()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
A_ : Optional[int] = model(**lowerCamelCase)
A_ : List[Any] = outputs.loss
A_ : Tuple = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
A_ : Union[str, Any] = model(**lowerCamelCase)
A_ : Any = outputs.logits.argmax(dim=-1)
A_ , A_ : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]))
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
A_ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase)
def lowerCamelCase ( ):
A_ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""")
parser.add_argument(
"""--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""")
A_ : Dict = parser.parse_args()
A_ : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """mobilenet_v2"""
def __init__( self : Optional[Any] ,_a : Tuple=3 ,_a : Optional[int]=224 ,_a : Optional[int]=1.0 ,_a : List[str]=8 ,_a : Any=8 ,_a : Optional[int]=6 ,_a : Dict=32 ,_a : Union[str, Any]=True ,_a : str=True ,_a : Optional[int]="relu6" ,_a : Dict=True ,_a : List[Any]=0.8 ,_a : Optional[Any]=0.02 ,_a : Tuple=0.001 ,_a : Optional[Any]=255 ,**_a : int ,):
'''simple docstring'''
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
A_ : Any = num_channels
A_ : Any = image_size
A_ : List[str] = depth_multiplier
A_ : str = depth_divisible_by
A_ : Any = min_depth
A_ : int = expand_ratio
A_ : Any = output_stride
A_ : Tuple = first_layer_is_expansion
A_ : str = finegrained_output
A_ : Any = hidden_act
A_ : Tuple = tf_padding
A_ : Optional[Any] = classifier_dropout_prob
A_ : Dict = initializer_range
A_ : int = layer_norm_eps
A_ : List[Any] = semantic_loss_ignore_index
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = version.parse("""1.11""" )
@property
def _a ( self : List[Any] ):
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def _a ( self : Tuple ):
'''simple docstring'''
return 1e-4
| 665 |
'''simple docstring'''
import functools
def lowerCamelCase ( lowerCamelCase : list[int] , lowerCamelCase : list[int]):
# Validation
if not isinstance(lowerCamelCase , lowerCamelCase) or not all(isinstance(lowerCamelCase , lowerCamelCase) for day in days):
raise ValueError("""The parameter days should be a list of integers""")
if len(lowerCamelCase) != 3 or not all(isinstance(lowerCamelCase , lowerCamelCase) for cost in costs):
raise ValueError("""The parameter costs should be a list of three integers""")
if len(lowerCamelCase) == 0:
return 0
if min(lowerCamelCase) <= 0:
raise ValueError("""All days elements should be greater than 0""")
if max(lowerCamelCase) >= 366:
raise ValueError("""All days elements should be less than 366""")
A_ : Tuple = set(lowerCamelCase)
@functools.cache
def dynamic_programming(lowerCamelCase : int) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1)
return min(
costs[0] + dynamic_programming(index + 1) , costs[1] + dynamic_programming(index + 7) , costs[2] + dynamic_programming(index + 30) , )
return dynamic_programming(1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__magic_name__ = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
__magic_name__ = {'facebook/blenderbot_small-90M': 512}
def lowerCamelCase ( lowerCamelCase : Dict):
A_ : Dict = set()
A_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
A_ : Tuple = char
A_ : int = set(lowerCamelCase)
return pairs
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] ,_a : Optional[Any] ,_a : int ,_a : Any="__start__" ,_a : int="__end__" ,_a : Union[str, Any]="__unk__" ,_a : Any="__null__" ,**_a : Union[str, Any] ,):
'''simple docstring'''
super().__init__(unk_token=_a ,bos_token=_a ,eos_token=_a ,pad_token=_a ,**_a )
with open(_a ,encoding="""utf-8""" ) as vocab_handle:
A_ : int = json.load(_a )
A_ : int = {v: k for k, v in self.encoder.items()}
with open(_a ,encoding="""utf-8""" ) as merges_handle:
A_ : str = merges_handle.read().split("""\n""" )[1:-1]
A_ : Optional[int] = [tuple(merge.split() ) for merge in merges]
A_ : str = dict(zip(_a ,range(len(_a ) ) ) )
A_ : str = {}
@property
def _a ( self : List[Any] ):
'''simple docstring'''
return len(self.encoder )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def _a ( self : List[Any] ,_a : str ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A_ : Union[str, Any] = re.sub("""([.,!?()])""" ,r""" \1""" ,_a )
A_ : Optional[int] = re.sub("""(')""" ,r""" \1 """ ,_a )
A_ : Tuple = re.sub(r"""\s{2,}""" ,""" """ ,_a )
if "\n" in token:
A_ : Any = token.replace("""\n""" ,""" __newln__""" )
A_ : Union[str, Any] = token.split(""" """ )
A_ : Optional[Any] = []
for token in tokens:
if not len(_a ):
continue
A_ : Tuple = token.lower()
A_ : Tuple = tuple(_a )
A_ : List[Any] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
A_ : Optional[int] = get_pairs(_a )
if not pairs:
words.append(_a )
continue
while True:
A_ : Optional[Any] = min(_a ,key=lambda _a : self.bpe_ranks.get(_a ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A_ , A_ : Dict = bigram
A_ : Optional[int] = []
A_ : Any = 0
while i < len(_a ):
try:
A_ : Any = word.index(_a ,_a )
new_word.extend(word[i:j] )
A_ : Tuple = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A_ : Dict = tuple(_a )
A_ : List[str] = new_word
if len(_a ) == 1:
break
else:
A_ : Union[str, Any] = get_pairs(_a )
A_ : Union[str, Any] = """@@ """.join(_a )
A_ : List[str] = word[:-4]
A_ : Dict = word
words.append(_a )
return " ".join(_a )
def _a ( self : List[Any] ,_a : str ):
'''simple docstring'''
A_ : Optional[Any] = []
A_ : Optional[int] = re.findall(r"""\S+\n?""" ,_a )
for token in words:
split_tokens.extend(list(self.bpe(_a ).split(""" """ ) ) )
return split_tokens
def _a ( self : Optional[Any] ,_a : str ):
'''simple docstring'''
A_ : Any = token.lower()
return self.encoder.get(_a ,self.encoder.get(self.unk_token ) )
def _a ( self : Any ,_a : int ):
'''simple docstring'''
return self.decoder.get(_a ,self.unk_token )
def _a ( self : Optional[int] ,_a : List[str] ):
'''simple docstring'''
A_ : List[str] = """ """.join(_a ).replace("""@@ """ ,"""""" ).strip()
return out_string
def _a ( self : str ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : str = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : List[Any] = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_a ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_a ,ensure_ascii=_a ) + """\n""" )
A_ : str = 0
with open(_a ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
A_ : Tuple = token_index
writer.write(""" """.join(_a ) + """\n""" )
index += 1
return vocab_file, merge_file
| 665 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def lowerCamelCase ( lowerCamelCase : NDArray[floataa] , lowerCamelCase : NDArray[floataa] , lowerCamelCase : list[int] , lowerCamelCase : int , ):
A_ , A_ : int = coefficient_matrix.shape
A_ , A_ : Union[str, Any] = constant_matrix.shape
if rowsa != colsa:
A_ : Any = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if colsa != 1:
A_ : Tuple = F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(lowerCamelCase)
if rowsa != rowsa:
A_ : Dict = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(lowerCamelCase)
if len(lowerCamelCase) != rowsa:
A_ : Union[str, Any] = (
"""Number of initial values must be equal to number of rows in coefficient """
F'matrix but received {len(lowerCamelCase)} and {rowsa}'
)
raise ValueError(lowerCamelCase)
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""")
A_ : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1)
A_ , A_ : int = table.shape
strictly_diagonally_dominant(lowerCamelCase)
# Iterates the whole matrix for given number of times
for _ in range(lowerCamelCase):
A_ : List[Any] = []
for row in range(lowerCamelCase):
A_ : int = 0
for col in range(lowerCamelCase):
if col == row:
A_ : List[str] = table[row][col]
elif col == cols - 1:
A_ : str = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
A_ : Union[str, Any] = (temp + val) / denom
new_val.append(lowerCamelCase)
A_ : Tuple = new_val
return [float(lowerCamelCase) for i in new_val]
def lowerCamelCase ( lowerCamelCase : NDArray[floataa]):
A_ , A_ : Dict = table.shape
A_ : Union[str, Any] = True
for i in range(0 , lowerCamelCase):
A_ : str = 0
for j in range(0 , cols - 1):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""")
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {
'configuration_maskformer': ['MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MaskFormerConfig'],
'configuration_maskformer_swin': ['MaskFormerSwinConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['MaskFormerFeatureExtractor']
__magic_name__ = ['MaskFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'MaskFormerForInstanceSegmentation',
'MaskFormerModel',
'MaskFormerPreTrainedModel',
]
__magic_name__ = [
'MaskFormerSwinBackbone',
'MaskFormerSwinModel',
'MaskFormerSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
A_ : Any = len(lowerCamelCase)
A_ : Optional[Any] = len(lowerCamelCase)
A_ : Optional[int] = [[False for _ in range(m + 1)] for _ in range(n + 1)]
A_ : Union[str, Any] = True
for i in range(lowerCamelCase):
for j in range(m + 1):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
A_ : Optional[int] = True
if a[i].islower():
A_ : List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int):
A_ : str = generate_pascal_triangle(lowerCamelCase)
for row_idx in range(lowerCamelCase):
# Print left spaces
for _ in range(num_rows - row_idx - 1):
print(end=""" """)
# Print row values
for col_idx in range(row_idx + 1):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """)
else:
print(triangle[row_idx][col_idx] , end="""""")
print()
def lowerCamelCase ( lowerCamelCase : int):
if not isinstance(lowerCamelCase , lowerCamelCase):
raise TypeError("""The input value of 'num_rows' should be 'int'""")
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""")
A_ : list[list[int]] = []
for current_row_idx in range(lowerCamelCase):
A_ : Union[str, Any] = populate_current_row(lowerCamelCase , lowerCamelCase)
triangle.append(lowerCamelCase)
return triangle
def lowerCamelCase ( lowerCamelCase : list[list[int]] , lowerCamelCase : int):
A_ : int = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
A_ , A_ : Union[str, Any] = 1, 1
for current_col_idx in range(1 , lowerCamelCase):
calculate_current_element(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
return current_row
def lowerCamelCase ( lowerCamelCase : list[list[int]] , lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : int , ):
A_ : int = triangle[current_row_idx - 1][current_col_idx - 1]
A_ : Union[str, Any] = triangle[current_row_idx - 1][current_col_idx]
A_ : List[Any] = above_to_left_elt + above_to_right_elt
def lowerCamelCase ( lowerCamelCase : int):
if not isinstance(lowerCamelCase , lowerCamelCase):
raise TypeError("""The input value of 'num_rows' should be 'int'""")
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""")
A_ : list[list[int]] = [[1]]
for row_index in range(1 , lowerCamelCase):
A_ : Dict = [0] + result[-1] + [0]
A_ : Optional[Any] = row_index + 1
# Calculate the number of distinct elements in a row
A_ : Any = sum(divmod(lowerCamelCase , 2))
A_ : Optional[Any] = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1)
]
A_ : Dict = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
A_ : Optional[int] = row_first_half + row_second_half
result.append(lowerCamelCase)
return result
def lowerCamelCase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCamelCase : Callable , lowerCamelCase : int) -> None:
A_ : Tuple = F'{func.__name__}({value})'
A_ : List[Any] = timeit(F'__main__.{call}' , setup="""import __main__""")
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds')
for value in range(15): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCamelCase , lowerCamelCase)
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 665 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
a_ = 42
a_ = 42
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : int ):
'''simple docstring'''
A_ : list[list[Edge]] = [[] for _ in range(_a )]
A_ : List[Any] = size
def __getitem__( self : int ,_a : int ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def _a ( self : str ):
'''simple docstring'''
return self._size
def _a ( self : str ,_a : int ,_a : int ,_a : int ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(_a ,_a ) )
def _a ( self : Dict ,_a : int ,_a : int ):
'''simple docstring'''
A_ : Tuple = deque([start_vertex] )
A_ : list[int | None] = [None] * self.size
A_ : Union[str, Any] = 0
while queue:
A_ : List[Any] = queue.popleft()
A_ : Tuple = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
A_ : Union[str, Any] = current_distance + edge.weight
A_ : Optional[Any] = distances[edge.destination_vertex]
if (
isinstance(_a ,_a )
and new_distance >= dest_vertex_distance
):
continue
A_ : Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__magic_name__ = 16
__magic_name__ = 32
def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16):
A_ : Optional[int] = AutoTokenizer.from_pretrained("""bert-base-cased""")
A_ : Any = load_dataset("""glue""" , """mrpc""")
def tokenize_function(lowerCamelCase : Optional[Any]):
# max_length=None => use the model max length (it's actually the default)
A_ : Dict = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A_ : Union[str, Any] = datasets.map(
lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : Any = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(lowerCamelCase : Any):
# On TPU it's best to pad everything to the same length or training will be very slow.
A_ : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A_ : Optional[int] = 16
elif accelerator.mixed_precision != "no":
A_ : Tuple = 8
else:
A_ : List[Any] = None
return tokenizer.pad(
lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
A_ : Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase)
A_ : Dict = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__magic_name__ = mocked_dataloaders # noqa: F811
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Any):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowerCamelCase) == "1":
A_ : Any = 2
# New Code #
A_ : Any = int(args.gradient_accumulation_steps)
# Initialize accelerator
A_ : str = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowerCamelCase)
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""")
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : Union[str, Any] = config["""lr"""]
A_ : Union[str, Any] = int(config["""num_epochs"""])
A_ : str = int(config["""seed"""])
A_ : Any = int(config["""batch_size"""])
A_ : Tuple = evaluate.load("""glue""" , """mrpc""")
set_seed(lowerCamelCase)
A_ , A_ : int = get_dataloaders(lowerCamelCase , lowerCamelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : str = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ : List[Any] = model.to(accelerator.device)
# Instantiate optimizer
A_ : Tuple = AdamW(params=model.parameters() , lr=lowerCamelCase)
# Instantiate scheduler
A_ : str = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ , A_ , A_ , A_ , A_ : str = accelerator.prepare(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Now we train the model
for epoch in range(lowerCamelCase):
model.train()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowerCamelCase):
A_ : Optional[Any] = model(**lowerCamelCase)
A_ : Any = output.loss
accelerator.backward(lowerCamelCase)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
A_ : str = model(**lowerCamelCase)
A_ : Union[str, Any] = outputs.logits.argmax(dim=-1)
A_ , A_ : Optional[int] = accelerator.gather_for_metrics((predictions, batch["""labels"""]))
metric.add_batch(
predictions=lowerCamelCase , references=lowerCamelCase , )
A_ : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , lowerCamelCase)
def lowerCamelCase ( ):
A_ : Optional[Any] = argparse.ArgumentParser(description="""Simple example of training script.""")
parser.add_argument(
"""--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowerCamelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""")
A_ : Tuple = parser.parse_args()
A_ : List[Any] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCamelCase , lowerCamelCase)
if __name__ == "__main__":
main()
| 665 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int = 10**9):
A_ : Optional[int] = 1
A_ : int = 2
A_ : List[Any] = 0
A_ : Optional[Any] = 0
A_ : str = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
A_ : Optional[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 665 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[int] ,*_a : List[Any] ,**_a : Optional[int] ):
'''simple docstring'''
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" ,_a ,)
super().__init__(*_a ,**_a )
| 665 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase ( ):
A_ : Optional[int] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCamelCase)
A_ : Optional[int] = parser.add_subparsers(help="""accelerate command helpers""")
# Register commands
get_config_parser(subparsers=lowerCamelCase)
env_command_parser(subparsers=lowerCamelCase)
launch_command_parser(subparsers=lowerCamelCase)
tpu_command_parser(subparsers=lowerCamelCase)
test_command_parser(subparsers=lowerCamelCase)
# Let's go
A_ : Dict = parser.parse_args()
if not hasattr(lowerCamelCase , """func"""):
parser.print_help()
exit(1)
# Run
args.func(lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 1 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class __lowerCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_a : List[Any] ,_a : Dict ,_a : Union[str, Any] ,_a : Optional[int] ,_a : str=1 ,_a : Dict=False ,**_a : int ):
'''simple docstring'''
super().__init__(**_a )
A_ : Union[str, Any] = vocab_size
A_ : List[Any] = d_embed
A_ : Optional[Any] = d_proj
A_ : Any = cutoffs + [vocab_size]
A_ : Optional[int] = [0] + self.cutoffs
A_ : List[str] = div_val
A_ : Optional[int] = self.cutoffs[0]
A_ : Dict = len(self.cutoffs ) - 1
A_ : str = self.shortlist_size + self.n_clusters
A_ : Union[str, Any] = keep_order
A_ : List[str] = []
A_ : Tuple = []
def _a ( self : Dict ,_a : Dict ):
'''simple docstring'''
if self.n_clusters > 0:
A_ : Optional[Any] = self.add_weight(
shape=(self.n_clusters, self.d_embed) ,initializer="""zeros""" ,trainable=_a ,name="""cluster_weight""" )
A_ : str = self.add_weight(
shape=(self.n_clusters,) ,initializer="""zeros""" ,trainable=_a ,name="""cluster_bias""" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
A_ : int = self.add_weight(
shape=(self.d_embed, self.d_proj) ,initializer="""zeros""" ,trainable=_a ,name=f'out_projs_._{i}' ,)
self.out_projs.append(_a )
else:
self.out_projs.append(_a )
A_ : Union[str, Any] = self.add_weight(
shape=(self.vocab_size, self.d_embed) ,initializer="""zeros""" ,trainable=_a ,name=f'out_layers_._{i}_._weight' ,)
A_ : List[str] = self.add_weight(
shape=(self.vocab_size,) ,initializer="""zeros""" ,trainable=_a ,name=f'out_layers_._{i}_._bias' ,)
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
A_ , A_ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
A_ : Optional[Any] = self.d_embed // (self.div_val**i)
A_ : Optional[Any] = self.add_weight(
shape=(d_emb_i, self.d_proj) ,initializer="""zeros""" ,trainable=_a ,name=f'out_projs_._{i}' )
self.out_projs.append(_a )
A_ : List[Any] = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) ,initializer="""zeros""" ,trainable=_a ,name=f'out_layers_._{i}_._weight' ,)
A_ : Any = self.add_weight(
shape=(r_idx - l_idx,) ,initializer="""zeros""" ,trainable=_a ,name=f'out_layers_._{i}_._bias' ,)
self.out_layers.append((weight, bias) )
super().build(_a )
@staticmethod
def _a ( _a : Any ,_a : List[str] ,_a : str ,_a : Union[str, Any]=None ):
'''simple docstring'''
A_ : int = x
if proj is not None:
A_ : Any = tf.einsum("""ibd,ed->ibe""" ,_a ,_a )
return tf.einsum("""ibd,nd->ibn""" ,_a ,_a ) + b
@staticmethod
def _a ( _a : Tuple ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Tuple = shape_list(_a )
A_ : Optional[Any] = tf.range(lp_size[0] ,dtype=target.dtype )
A_ : Union[str, Any] = tf.stack([r, target] ,1 )
return tf.gather_nd(_a ,_a )
def _a ( self : str ,_a : List[Any] ,_a : str ,_a : str=True ,_a : str=False ):
'''simple docstring'''
A_ : Union[str, Any] = 0
if self.n_clusters == 0:
A_ : Any = self._logit(_a ,self.out_layers[0][0] ,self.out_layers[0][1] ,self.out_projs[0] )
if target is not None:
A_ : Union[str, Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_a ,logits=_a )
A_ : Tuple = tf.nn.log_softmax(_a ,axis=-1 )
else:
A_ : int = shape_list(_a )
A_ : int = []
A_ : Optional[int] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
A_ , A_ : int = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
A_ : List[str] = (target >= l_idx) & (target < r_idx)
A_ : int = tf.where(_a )
A_ : Union[str, Any] = tf.boolean_mask(_a ,_a ) - l_idx
if self.div_val == 1:
A_ : Dict = self.out_layers[0][0][l_idx:r_idx]
A_ : str = self.out_layers[0][1][l_idx:r_idx]
else:
A_ : int = self.out_layers[i][0]
A_ : List[str] = self.out_layers[i][1]
if i == 0:
A_ : str = tf.concat([cur_W, self.cluster_weight] ,0 )
A_ : Optional[int] = tf.concat([cur_b, self.cluster_bias] ,0 )
A_ : List[Any] = self._logit(_a ,_a ,_a ,self.out_projs[0] )
A_ : List[str] = tf.nn.log_softmax(_a )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
A_ : str = tf.boolean_mask(_a ,_a )
A_ : Optional[int] = self._gather_logprob(_a ,_a )
else:
A_ : Optional[Any] = self._logit(_a ,_a ,_a ,self.out_projs[i] )
A_ : Optional[Any] = tf.nn.log_softmax(_a )
A_ : Optional[int] = self.cutoffs[0] + i - 1 # No probability for the head cluster
A_ : Optional[Any] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(_a )
if target is not None:
A_ : Tuple = tf.boolean_mask(_a ,_a )
A_ : int = tf.boolean_mask(_a ,_a )
A_ : Tuple = self._gather_logprob(_a ,_a )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(_a ,-cur_logprob ,shape_list(_a ) )
A_ : Optional[int] = tf.concat(_a ,axis=-1 )
if target is not None:
if return_mean:
A_ : Optional[Any] = tf.reduce_mean(_a )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(_a )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(_a ,name=self.name ,aggregation="""mean""" if return_mean else """""" )
return out
| 665 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
'configuration_altclip': [
'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AltCLIPConfig',
'AltCLIPTextConfig',
'AltCLIPVisionConfig',
],
'processing_altclip': ['AltCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'AltCLIPPreTrainedModel',
'AltCLIPModel',
'AltCLIPTextModel',
'AltCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 665 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.