code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class SCREAMING_SNAKE_CASE__ (UpperCamelCase_ ):
def __init__( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = params
lowerCAmelCase__ = np.array(a_ )
lowerCAmelCase__ = np.array([len(a_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : List[str] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self : str ):
"""simple docstring"""
return len(self.lengths )
def A__ ( self : Optional[Any] ):
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def A__ ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.params.max_model_input_size
lowerCAmelCase__ = self.lengths > max_len
logger.info(F"""Splitting {sum(a_ )} too long sequences.""" )
def divide_chunks(__lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ):
return [l[i : i + n] for i in range(0 , len(a_ ) , a_ )]
lowerCAmelCase__ = []
lowerCAmelCase__ = []
if self.params.mlm:
lowerCAmelCase__ = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
lowerCAmelCase__ = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowerCAmelCase__ = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
lowerCAmelCase__ = np.insert(a_ , 0 , a_ )
if sub_s[-1] != sep_id:
lowerCAmelCase__ = np.insert(a_ , len(a_ ) , a_ )
assert len(a_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(a_ )
new_tok_ids.extend(a_ )
new_lengths.extend([len(a_ ) for l in sub_seqs] )
lowerCAmelCase__ = np.array(a_ )
lowerCAmelCase__ = np.array(a_ )
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = len(self )
lowerCAmelCase__ = self.lengths > 11
lowerCAmelCase__ = self.token_ids[indices]
lowerCAmelCase__ = self.lengths[indices]
lowerCAmelCase__ = len(self )
logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" )
def A__ ( self : Union[str, Any] ):
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowerCAmelCase__ = self.params.special_tok_ids['unk_token']
lowerCAmelCase__ = len(self )
lowerCAmelCase__ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowerCAmelCase__ = (unk_occs / self.lengths) < 0.5
lowerCAmelCase__ = self.token_ids[indices]
lowerCAmelCase__ = self.lengths[indices]
lowerCAmelCase__ = len(self )
logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" )
def A__ ( self : Optional[Any] ):
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F"""{len(self )} sequences""" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def A__ ( self : List[Any] , __lowerCamelCase : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = [t[0] for t in batch]
lowerCAmelCase__ = [t[1] for t in batch]
assert len(a_ ) == len(a_ )
# Max for paddings
lowerCAmelCase__ = max(a_ )
# Pad token ids
if self.params.mlm:
lowerCAmelCase__ = self.params.special_tok_ids['pad_token']
else:
lowerCAmelCase__ = self.params.special_tok_ids['unk_token']
lowerCAmelCase__ = [list(t.astype(a_ ) ) + [pad_idx] * (max_seq_len_ - len(a_ )) for t in token_ids]
assert len(tk_ ) == len(a_ )
assert all(len(a_ ) == max_seq_len_ for t in tk_ )
lowerCAmelCase__ = torch.tensor(tk_ ) # (bs, max_seq_len_)
lowerCAmelCase__ = torch.tensor(a_ ) # (bs)
return tk_t, lg_t
| 615
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE__ : List[Any] = "Hello world! cécé herlolip"
SCREAMING_SNAKE_CASE__ : Dict = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def _a ( lowercase__ : List[str] , lowercase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = BertAbsConfig(
temp_dir='.' , finetune_bert=lowercase__ , large=lowercase__ , share_emb=lowercase__ , use_bert_emb=lowercase__ , encoder='bert' , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.load(lowercase__ , lambda lowercase__ , lowercase__ : storage )
SCREAMING_SNAKE_CASE__ : Any = AbsSummarizer(lowercase__ , torch.device('cpu' ) , lowercase__ )
original.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = BertAbsSummarizer(lowercase__ , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
SCREAMING_SNAKE_CASE__ : Any = BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowercase__ )) )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(lowercase__ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowercase__ )) )
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor(lowercase__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
SCREAMING_SNAKE_CASE__ : int = encoder_input_ids
SCREAMING_SNAKE_CASE__ : Any = decoder_input_ids
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : str = None
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE__ : Optional[Any] = original(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = original.generator(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = new_model(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )[0]
SCREAMING_SNAKE_CASE__ : List[Any] = new_model.generator(lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(lowercase__ ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(lowercase__ ) )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.allclose(lowercase__ , lowercase__ , atol=1E-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 85
| 0
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowercase = 4
lowercase = 48
lowercase = "pixelshuffle_aux"
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase = [6, 6, 6, 6]
lowercase = 60
lowercase = [6, 6, 6, 6]
lowercase = "pixelshuffledirect"
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase = 4
lowercase = "nearest+conv"
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowercase = 1
lowercase = 1
lowercase = 126
lowercase = 7
lowercase = 2_55.0
lowercase = ""
return config
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if "patch_embed.proj" in name and "layers" not in name:
lowercase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowercase = name.replace("patch_embed.norm" , "embeddings.patch_embeddings.layernorm" )
if "layers" in name:
lowercase = name.replace("layers" , "encoder.stages" )
if "residual_group.blocks" in name:
lowercase = name.replace("residual_group.blocks" , "layers" )
if "attn.proj" in name:
lowercase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowercase = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowercase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowercase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowercase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowercase = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
lowercase = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
lowercase = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
lowercase = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
lowercase = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if "patch_embed.proj" in name:
lowercase = name.replace("patch_embed.proj" , "patch_embed.projection" )
if name == "norm.weight":
lowercase = "layernorm.weight"
if name == "norm.bias":
lowercase = "layernorm.bias"
if "conv_first" in name:
lowercase = name.replace("conv_first" , "first_convolution" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowercase = name.replace("conv_last" , "final_convolution" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowercase = name.replace("conv_before_upsample.0" , "conv_before_upsample" )
if "upsample.0" in name:
lowercase = name.replace("upsample.0" , "upsample.convolution_0" )
if "upsample.2" in name:
lowercase = name.replace("upsample.2" , "upsample.convolution_1" )
lowercase = "upsample." + name
elif config.upsampler == "pixelshuffledirect":
lowercase = name.replace("upsample.0.weight" , "upsample.conv.weight" )
lowercase = name.replace("upsample.0.bias" , "upsample.conv.bias" )
else:
pass
else:
lowercase = "swin2sr." + name
return name
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowerCAmelCase_ )
if "qkv" in key:
lowercase = key.split("." )
lowercase = int(key_split[1] )
lowercase = int(key_split[4] )
lowercase = config.embed_dim
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
pass
else:
lowercase = val
return orig_state_dict
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
lowercase = get_config(lowerCAmelCase_ )
lowercase = SwinaSRForImageSuperResolution(lowerCAmelCase_ )
model.eval()
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="cpu" )
lowercase = convert_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase , lowercase = model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
raise ValueError("Missing keys when converting: {}".format(lowerCAmelCase_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'Unexpected key {key} in state_dict' )
# verify values
lowercase = "https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"
lowercase = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ).convert("RGB" )
lowercase = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowercase = 126 if "Jpeg" in checkpoint_url else 256
lowercase = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
lowercase = transforms(lowerCAmelCase_ ).unsqueeze(0 )
if config.num_channels == 1:
lowercase = pixel_values[:, 0, :, :].unsqueeze(1 )
lowercase = model(lowerCAmelCase_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowercase = torch.Size([1, 3, 512, 512] )
lowercase = torch.tensor(
[[-0.70_87, -0.71_38, -0.67_21], [-0.83_40, -0.80_95, -0.72_98], [-0.91_49, -0.84_14, -0.79_40]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase = torch.Size([1, 3, 1024, 1024] )
lowercase = torch.tensor(
[[-0.77_75, -0.81_05, -0.89_33], [-0.77_64, -0.83_56, -0.92_25], [-0.79_76, -0.86_86, -0.95_79]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowercase = torch.Size([1, 3, 1024, 1024] )
lowercase = torch.tensor(
[[-0.80_35, -0.75_04, -0.74_91], [-0.85_38, -0.81_24, -0.77_82], [-0.88_04, -0.86_51, -0.84_93]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase = torch.Size([1, 3, 512, 512] )
lowercase = torch.tensor(
[[-0.76_69, -0.86_62, -0.87_67], [-0.88_10, -0.99_62, -0.98_20], [-0.93_40, -1.03_22, -1.11_49]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase = torch.Size([1, 3, 1024, 1024] )
lowercase = torch.tensor(
[[-0.52_38, -0.55_57, -0.63_21], [-0.60_16, -0.59_03, -0.63_91], [-0.62_44, -0.63_34, -0.68_89]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowerCAmelCase_ , atol=1E-3 )
print("Looks ok!" )
lowercase = {
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth": (
"swin2SR-classical-sr-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth": (
"swin2SR-classical-sr-x4-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth": (
"swin2SR-compressed-sr-x4-48"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth": (
"swin2SR-lightweight-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth": (
"swin2SR-realworld-sr-x4-64-bsrgan-psnr"
),
}
lowercase = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase_ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
model.push_to_hub(f'caidas/{model_name}' )
processor.push_to_hub(f'caidas/{model_name}' )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
__lowerCamelCase : Any = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 459
|
'''simple docstring'''
from functools import lru_cache
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = 2
lowercase = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(lowerCAmelCase_ )
if n > 1:
factors.add(lowerCAmelCase_ )
return factors
@lru_cache
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
return len(unique_prime_factors(lowerCAmelCase_ ) )
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
return len(set(lowerCAmelCase_ ) ) in (0, 1)
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = 2
while True:
# Increment each value of a generated range
lowercase = [base + i for i in range(lowerCAmelCase_ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
lowercase = [upf_len(lowerCAmelCase_ ) for x in group]
checker.append(lowerCAmelCase_ )
# If all numbers in the list are equal, return the group variable.
if equality(lowerCAmelCase_ ):
return group
# Increment our base variable by 1
base += 1
def UpperCAmelCase_ ( lowerCAmelCase_ = 4 ):
"""simple docstring"""
lowercase = run(lowerCAmelCase_ )
return results[0] if len(lowerCAmelCase_ ) else None
if __name__ == "__main__":
print(solution())
| 459
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( _snake_case , unittest.TestCase ):
lowercase = CTRLTokenizer
lowercase = False
lowercase = False
def snake_case_ ( self ) -> int:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A_ = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
A_ = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A_ = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
A_ = {"""unk_token""": """<unk>"""}
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase__ ) )
def snake_case_ ( self , **UpperCamelCase__ ) -> int:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = """adapt react readapt apt"""
A_ = """adapt react readapt apt"""
return input_text, output_text
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A_ = """adapt react readapt apt"""
A_ = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
A_ = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
A_ = tokens + [tokenizer.unk_token]
A_ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
| 288
|
'''simple docstring'''
from bisect import bisect
from itertools import accumulate
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Dict:
A_ = sorted(zip(UpperCAmelCase__, UpperCAmelCase__ ), key=lambda UpperCAmelCase__ : x[0] / x[1], reverse=UpperCAmelCase__ )
A_ , A_ = [i[0] for i in r], [i[1] for i in r]
A_ = list(accumulate(UpperCAmelCase__ ) )
A_ = bisect(UpperCAmelCase__, UpperCAmelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 288
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 40
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = ["image_processor", "tokenizer"]
lowerCAmelCase : List[Any] = "BlipImageProcessor"
lowerCAmelCase : Union[str, Any] = "AutoTokenizer"
def __init__( self : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = False
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Tuple = self.image_processor
def __call__( self : Dict , lowerCamelCase__ : ImageInput = None , lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : Tuple , ) ->BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
_UpperCAmelCase : Optional[int] = self.tokenizer
_UpperCAmelCase : List[Any] = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
return text_encoding
# add pixel_values
_UpperCAmelCase : Optional[int] = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ )
if text is not None:
_UpperCAmelCase : Dict = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
else:
_UpperCAmelCase : int = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase__ )
return encoding_image_processor
def lowerCAmelCase__ ( self : List[Any] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Dict ) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , *lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.tokenizer.model_input_names
_UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 40
| 1
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowerCamelCase__ : Any = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : int = 101 ):
SCREAMING_SNAKE_CASE_ = length
def __len__( self : str ):
return self.length
def __getitem__( self : Dict , _lowerCAmelCase : str ):
return i
class lowerCamelCase_ :
'''simple docstring'''
def __call__( self : List[str] , _lowerCAmelCase : Optional[int] ):
return {"input_ids": torch.tensor(_lowerCAmelCase ), "labels": torch.tensor(_lowerCAmelCase )}
class lowerCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
SCREAMING_SNAKE_CASE_ = nn.Linear(120 , 80 )
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : int=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@require_torch_neuroncore
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = F"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = F"--output_dir {output_dir}".split()
SCREAMING_SNAKE_CASE_ = ['torchrun'] + distributed_args + args
execute_subprocess_async(_lowerCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@require_torch_multi_gpu
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = F"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
SCREAMING_SNAKE_CASE_ = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE_ = F"--output_dir {output_dir}".split()
SCREAMING_SNAKE_CASE_ = ['torchrun'] + distributed_args + args
execute_subprocess_async(_lowerCAmelCase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowerCamelCase__ : int = HfArgumentParser((TrainingArguments,))
lowerCamelCase__ : Optional[int] = parser.parse_args_into_dataclasses()[0]
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
f'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
lowerCamelCase__ : str = DummyDataset(dataset_length)
def UpperCAmelCase_ ( __UpperCAmelCase : EvalPrediction ) -> Dict:
SCREAMING_SNAKE_CASE_ = list(range(len(__UpperCAmelCase ) ) )
SCREAMING_SNAKE_CASE_ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
f"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" )
return {"success": success}
lowerCamelCase__ : Tuple = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowerCamelCase__ : List[Any] = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCamelCase__ : Dict = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCamelCase__ : List[str] = 2
lowerCamelCase__ : Tuple = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCamelCase__ : int = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCamelCase__ : Union[str, Any] = None
| 31
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ : List[str] = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
lowerCamelCase__ : Optional[Any] = {
'google/fnet-base': 512,
'google/fnet-large': 512,
}
lowerCamelCase__ : List[Any] = '▁'
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "token_type_ids"]
lowercase_ = FNetTokenizer
def __init__( self : List[Any] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[Any]="<unk>" , _lowerCAmelCase : Optional[Any]="[SEP]" , _lowerCAmelCase : Optional[Any]="<pad>" , _lowerCAmelCase : Optional[int]="[CLS]" , _lowerCAmelCase : Optional[Any]="[MASK]" , **_lowerCAmelCase : Any , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
SCREAMING_SNAKE_CASE_ = (
AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase , normalized=_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase )
else mask_token
)
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , remove_space=_lowerCAmelCase , keep_accents=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = remove_space
SCREAMING_SNAKE_CASE_ = keep_accents
SCREAMING_SNAKE_CASE_ = vocab_file
SCREAMING_SNAKE_CASE_ = False if not self.vocab_file else True
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE_ = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ):
copyfile(self.vocab_file , _lowerCAmelCase )
return (out_vocab_file,)
| 31
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase :Union[str, Any] = logging.get_logger(__name__)
__lowercase :Any = {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = "xglm"
snake_case_ = ["past_key_values"]
snake_case_ = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self : Union[str, Any] , a : int=25_60_08 , a : str=20_48 , a : Union[str, Any]=10_24 , a : str=40_96 , a : Optional[Any]=24 , a : Union[str, Any]=16 , a : Optional[Any]="gelu" , a : List[Any]=0.1 , a : Dict=0.1 , a : Any=0.0 , a : Optional[int]=0.0 , a : Union[str, Any]=0.02 , a : List[Any]=True , a : Optional[Any]=True , a : Union[str, Any]=2 , a : str=1 , a : Union[str, Any]=0 , a : List[Any]=2 , **a : Any , ) ->Tuple:
SCREAMING_SNAKE_CASE__ : str = vocab_size
SCREAMING_SNAKE_CASE__ : str = max_position_embeddings
SCREAMING_SNAKE_CASE__ : str = d_model
SCREAMING_SNAKE_CASE__ : Any = ffn_dim
SCREAMING_SNAKE_CASE__ : str = num_layers
SCREAMING_SNAKE_CASE__ : List[Any] = attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = activation_function
SCREAMING_SNAKE_CASE__ : Optional[int] = dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_dropout
SCREAMING_SNAKE_CASE__ : Union[str, Any] = activation_dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = layerdrop
SCREAMING_SNAKE_CASE__ : int = init_std
SCREAMING_SNAKE_CASE__ : str = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ : str = use_cache
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , decoder_start_token_id=a , **a , )
| 704
|
def UpperCAmelCase ( _lowerCamelCase : int = 4_000_000 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = [0, 1]
SCREAMING_SNAKE_CASE__ : List[Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"{solution() = }")
| 26
| 0
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__a = pd.read_csv("sample_data.csv", header=None)
__a = df.shape[:1][0]
# If you're using some other dataset input the target column
__a = df.iloc[:, 1:2]
__a = actual_data.values.reshape(len_data, 1)
__a = MinMaxScaler().fit_transform(actual_data)
__a = 10
__a = 5
__a = 20
__a = len_data - periods * look_back
__a = actual_data[:division]
__a = actual_data[division - look_back :]
__a , __a = [], []
__a , __a = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__a = np.array(train_x)
__a = np.array(test_x)
__a = np.array([list(i.ravel()) for i in train_y])
__a = np.array([list(i.ravel()) for i in test_y])
__a = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
__a = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
__a = model.predict(x_test)
| 374
|
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__a = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__a = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __snake_case( _lowerCAmelCase ) -> str:
if "://" in dataset_path:
snake_case__ : Tuple = dataset_path.split("""://""" )[1]
return dataset_path
def __snake_case( _lowerCAmelCase ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Optional[int]:
snake_case__ : int = not is_remote_filesystem(_lowerCAmelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_lowerCAmelCase ) , fs._strip_protocol(_lowerCAmelCase ) )
else:
fs.mv(_lowerCAmelCase , _lowerCAmelCase , recursive=_lowerCAmelCase )
def __snake_case( ) -> None:
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
snake_case__ : Dict = None
snake_case__ : List[str] = None
snake_case__ : Union[str, Any] = threading.Lock()
| 374
| 1
|
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class snake_case :
def __init__( self : List[str] , A : str , A : List[Any]=2 , A : str=3 , A : List[str]=4 , A : List[str]=2 , A : List[Any]=7 , A : Tuple=True , A : str=True , A : Optional[Any]=True , A : Optional[int]=True , A : List[Any]=9_9 , A : List[str]=3_6 , A : str=3 , A : Dict=4 , A : Any=3_7 , A : Optional[int]="gelu" , A : List[str]=0.1 , A : Any=0.1 , A : Union[str, Any]=5_1_2 , A : Optional[int]=1_6 , A : List[str]=2 , A : int=0.02 , A : Union[str, Any]=6 , A : Optional[Any]=6 , A : int=3 , A : List[Any]=4 , A : Union[str, Any]=None , A : Tuple=1_0_0_0 , ):
'''simple docstring'''
a : Any = parent
a : List[Any] = batch_size
a : int = num_channels
a : Any = image_size
a : List[str] = patch_size
a : int = text_seq_length
a : List[Any] = is_training
a : Union[str, Any] = use_input_mask
a : Optional[int] = use_token_type_ids
a : List[Any] = use_labels
a : List[Any] = vocab_size
a : Tuple = hidden_size
a : List[Any] = num_hidden_layers
a : List[str] = num_attention_heads
a : Optional[int] = intermediate_size
a : int = hidden_act
a : int = hidden_dropout_prob
a : int = attention_probs_dropout_prob
a : Optional[Any] = max_position_embeddings
a : Optional[Any] = type_vocab_size
a : Tuple = type_sequence_label_size
a : Optional[int] = initializer_range
a : Dict = coordinate_size
a : Any = shape_size
a : Dict = num_labels
a : str = num_choices
a : List[Any] = scope
a : str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
a : Any = text_seq_length
a : Any = (image_size // patch_size) ** 2 + 1
a : Any = self.text_seq_length + self.image_seq_length
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
a : Any = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a : Tuple = bbox[i, j, 3]
a : Optional[Any] = bbox[i, j, 1]
a : int = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a : str = bbox[i, j, 2]
a : List[Any] = bbox[i, j, 0]
a : List[Any] = t
a : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a : int = None
if self.use_input_mask:
a : Union[str, Any] = random_attention_mask([self.batch_size, self.text_seq_length] )
a : Union[str, Any] = None
if self.use_token_type_ids:
a : int = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
a : Optional[Any] = None
a : List[str] = None
if self.use_labels:
a : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : int = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
a : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCamelCase__ ( self : Tuple , A : str , A : Optional[int] , A : Any , A : List[Any] , A : int , A : List[Any] , A : Optional[Any] , A : Optional[Any] ):
'''simple docstring'''
a : Any = LayoutLMvaModel(config=A )
model.to(A )
model.eval()
# text + image
a : Dict = model(A , pixel_values=A )
a : List[Any] = model(
A , bbox=A , pixel_values=A , attention_mask=A , token_type_ids=A )
a : List[Any] = model(A , bbox=A , pixel_values=A , token_type_ids=A )
a : List[str] = model(A , bbox=A , pixel_values=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
a : Optional[int] = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
a : Optional[int] = model(pixel_values=A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Dict , A : Dict , A : int , A : Dict , A : Tuple , A : List[Any] , A : Optional[Any] , A : str , A : int ):
'''simple docstring'''
a : List[str] = self.num_labels
a : Optional[Any] = LayoutLMvaForSequenceClassification(A )
model.to(A )
model.eval()
a : Any = model(
A , bbox=A , pixel_values=A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Tuple , A : List[str] , A : Optional[Any] , A : Tuple , A : List[str] , A : int , A : Union[str, Any] , A : Dict , A : List[str] ):
'''simple docstring'''
a : Optional[Any] = self.num_labels
a : Any = LayoutLMvaForTokenClassification(config=A )
model.to(A )
model.eval()
a : Union[str, Any] = model(
A , bbox=A , pixel_values=A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowerCamelCase__ ( self : str , A : Optional[Any] , A : Union[str, Any] , A : Any , A : Any , A : Any , A : List[str] , A : List[str] , A : Optional[Any] ):
'''simple docstring'''
a : Optional[int] = LayoutLMvaForQuestionAnswering(config=A )
model.to(A )
model.eval()
a : Dict = model(
A , bbox=A , pixel_values=A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
a : Any = self.prepare_config_and_inputs()
(
(
a
), (
a
), (
a
), (
a
), (
a
), (
a
), (
a
), (
a
),
) : int = config_and_inputs
a : Dict = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__magic_name__ = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowerCamelCase__ ( self : Union[str, Any] , A : Dict , A : Optional[Any] , A : Tuple , A : Union[str, Any] , A : Optional[Any] ):
'''simple docstring'''
return True
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
a : List[str] = LayoutLMvaModelTester(self )
a : Any = ConfigTester(self , config_class=A , hidden_size=3_7 )
def lowerCamelCase__ ( self : str , A : Dict , A : Dict , A : Optional[Any]=False ):
'''simple docstring'''
a : Any = copy.deepcopy(A )
if model_class in get_values(A ):
a : Union[str, Any] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(A , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A ):
a : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=A )
elif model_class in get_values(A ):
a : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
a : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
elif model_class in [
*get_values(A ),
]:
a : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A )
elif model_class in [
*get_values(A ),
]:
a : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=A , )
return inputs_dict
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
a : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a : Optional[Any] = type
self.model_tester.create_and_check_model(*A )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@slow
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : int = LayoutLMvaModel.from_pretrained(A )
self.assertIsNotNone(A )
def snake_case ():
'''simple docstring'''
a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class snake_case ( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=A ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
a : Tuple = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(A )
a : List[str] = self.default_image_processor
a : Tuple = prepare_img()
a : Optional[Any] = image_processor(images=A , return_tensors='pt' ).pixel_values.to(A )
a : Any = torch.tensor([[1, 2]] )
a : List[Any] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
a : List[Any] = model(
input_ids=input_ids.to(A ) , bbox=bbox.to(A ) , pixel_values=pixel_values.to(A ) , )
# verify the logits
a : int = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape , A )
a : int = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , A , atol=1E-4 ) )
| 118
|
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def snake_case (A_ :int = 8 ):
'''simple docstring'''
a : Tuple = ascii_letters + digits + punctuation
return "".join(secrets.choice(A_ ) for _ in range(A_ ) )
def snake_case (A_ :str , A_ :int ):
'''simple docstring'''
i -= len(A_ )
a : Any = i // 3
a : Any = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
a : Dict = (
chars_incl
+ random(A_ , quotient + remainder )
+ random(A_ , A_ )
+ random(A_ , A_ )
)
a : Union[str, Any] = list(A_ )
shuffle(A_ )
return "".join(A_ )
# random is a generalised function for letters, characters and numbers
def snake_case (A_ :str , A_ :int ):
'''simple docstring'''
return "".join(secrets.choice(A_ ) for _ in range(A_ ) )
def snake_case (A_ :Optional[int] , A_ :str ):
'''simple docstring'''
pass # Put your code here...
def snake_case (A_ :Optional[Any] , A_ :Optional[Any] ):
'''simple docstring'''
pass # Put your code here...
def snake_case (A_ :str , A_ :List[Any] ):
'''simple docstring'''
pass # Put your code here...
def snake_case (A_ :str , A_ :int = 8 ):
'''simple docstring'''
if len(A_ ) < min_length:
# Your Password must be at least 8 characters long
return False
a : Union[str, Any] = any(char in ascii_uppercase for char in password )
a : Union[str, Any] = any(char in ascii_lowercase for char in password )
a : int = any(char in digits for char in password )
a : Tuple = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def snake_case ():
'''simple docstring'''
a : Optional[Any] = int(input('Please indicate the max length of your password: ' ).strip() )
a : Tuple = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(A_ ) )
print(
'Alternative Password generated:' , alternative_password_generator(A_ , A_ ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 118
| 1
|
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
__lowerCAmelCase : Any = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __lowerCAmelCase ( __UpperCamelCase : List[str] ):
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __lowerCAmelCase ( __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : List[str] ):
'''simple docstring'''
return max(metric_fn(__UpperCamelCase , __UpperCamelCase ) for gt in ground_truths )
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Optional[int] = [line.strip() for line in open(__UpperCamelCase , """r""" ).readlines()]
snake_case_ : List[str] = []
if args.gold_data_mode == "qa":
snake_case_ : Optional[int] = pd.read_csv(__UpperCamelCase , sep="""\t""" , header=__UpperCamelCase )
for answer_list in data[1]:
snake_case_ : Dict = ast.literal_eval(__UpperCamelCase )
answers.append(__UpperCamelCase )
else:
snake_case_ : Union[str, Any] = [line.strip() for line in open(__UpperCamelCase , """r""" ).readlines()]
snake_case_ : Dict = [[reference] for reference in references]
snake_case_ : str = 0
for prediction, ground_truths in zip(__UpperCamelCase , __UpperCamelCase ):
total += 1
em += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
fa += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ : Union[str, Any] = 100.0 * em / total
snake_case_ : Optional[Any] = 100.0 * fa / total
logger.info(F'F1: {fa:.2f}' )
logger.info(F'EM: {em:.2f}' )
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : int = args.k
snake_case_ : int = [line.strip() for line in open(__UpperCamelCase , """r""" ).readlines()]
snake_case_ : Dict = [line.strip() for line in open(__UpperCamelCase , """r""" ).readlines()]
snake_case_ : Any = 0
for hypo, reference in zip(__UpperCamelCase , __UpperCamelCase ):
snake_case_ : Dict = set(hypo.split("""\t""" )[:k] )
snake_case_ : List[Any] = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
snake_case_ : Dict = 100.0 * em / total
logger.info(F'Precision@{k}: {em: .2f}' )
def __lowerCAmelCase ( __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
def strip_title(__UpperCamelCase : str ):
if title.startswith("""\"""" ):
snake_case_ : Union[str, Any] = title[1:]
if title.endswith("""\"""" ):
snake_case_ : Union[str, Any] = title[:-1]
return title
snake_case_ : Tuple = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors="""pt""" , padding=__UpperCamelCase , truncation=__UpperCamelCase , )["""input_ids"""].to(args.device )
snake_case_ : int = rag_model.rag.question_encoder(__UpperCamelCase )
snake_case_ : Union[str, Any] = question_enc_outputs[0]
snake_case_ : Dict = rag_model.retriever(
__UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
snake_case_ : Tuple = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
snake_case_ : Optional[Any] = []
for docs in all_docs:
snake_case_ : List[Any] = [strip_title(__UpperCamelCase ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(__UpperCamelCase ) )
return provenance_strings
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
with torch.no_grad():
snake_case_ : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors="""pt""" , padding=__UpperCamelCase , truncation=__UpperCamelCase )
snake_case_ : List[Any] = inputs_dict.input_ids.to(args.device )
snake_case_ : Optional[int] = inputs_dict.attention_mask.to(args.device )
snake_case_ : Any = rag_model.generate( # rag_model overwrites generate
__UpperCamelCase , attention_mask=__UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
snake_case_ : Tuple = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
if args.print_predictions:
for q, a in zip(__UpperCamelCase , __UpperCamelCase ):
logger.info("""Q: {} - A: {}""".format(__UpperCamelCase , __UpperCamelCase ) )
return answers
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : Any = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=__UpperCamelCase , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=__UpperCamelCase , choices=["""exact""", """compressed""", """legacy"""] , type=__UpperCamelCase , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=__UpperCamelCase , type=__UpperCamelCase , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=__UpperCamelCase , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=__UpperCamelCase , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=__UpperCamelCase , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=__UpperCamelCase , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=__UpperCamelCase , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=__UpperCamelCase , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=__UpperCamelCase , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=__UpperCamelCase , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=5_0 , type=__UpperCamelCase , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
snake_case_ : Dict = parser.parse_args()
snake_case_ : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def __lowerCAmelCase ( __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Optional[Any] = {}
if args.model_type is None:
snake_case_ : Any = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
snake_case_ : List[Any] = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
snake_case_ : Tuple = args.n_docs
if args.index_name is not None:
snake_case_ : Tuple = args.index_name
if args.index_path is not None:
snake_case_ : Any = args.index_path
else:
snake_case_ : Optional[Any] = BartForConditionalGeneration
snake_case_ : int = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , __UpperCamelCase )
snake_case_ : Union[str, Any] = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
snake_case_ : int = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(__UpperCamelCase ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
snake_case_ : Any = RagRetriever.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
snake_case_ : Any = model_class.from_pretrained(__UpperCamelCase , retriever=__UpperCamelCase , **__UpperCamelCase )
model.retriever.init_retrieval()
else:
snake_case_ : int = model_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
snake_case_ : List[Any] = []
for line in tqdm(__UpperCamelCase ):
questions.append(line.strip() )
if len(__UpperCamelCase ) == args.eval_batch_size:
snake_case_ : Tuple = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write("""\n""".join(__UpperCamelCase ) + """\n""" )
preds_file.flush()
snake_case_ : Dict = []
if len(__UpperCamelCase ) > 0:
snake_case_ : List[str] = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write("""\n""".join(__UpperCamelCase ) )
preds_file.flush()
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = get_args()
main(args)
| 58
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->Dict:
'''simple docstring'''
a : Any = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
a : str = True if "large" in model_name or "huge" in model_name else False
a : Optional[Any] = True if "large" in model_name or "huge" in model_name else False
a : Dict = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
a : Union[str, Any] = [3, 3, 3, 3]
a : List[str] = [5, 5, 5, 5]
elif "fl4" in model_name:
a : Any = [4, 4, 4, 4]
a : Optional[Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
a : Dict = [3, 3, 3, 3]
if "lrf" in model_name:
a : Optional[int] = [3, 3, 3, 3]
else:
a : Tuple = [2, 2, 2, 2]
if "tiny" in model_name:
a : List[str] = 96
elif "small" in model_name:
a : Union[str, Any] = 96
elif "base" in model_name:
a : Dict = 128
elif "large" in model_name:
a : Union[str, Any] = 192
elif "xlarge" in model_name:
a : Tuple = 256
elif "huge" in model_name:
a : List[str] = 352
# set label information
a : List[Any] = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
a : Optional[int] = "imagenet-22k-id2label.json"
else:
a : List[str] = "imagenet-1k-id2label.json"
a : Optional[int] = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="dataset" ) , "r" ) )
a : str = {int(_lowercase ): v for k, v in idalabel.items()}
a : List[str] = {v: k for k, v in idalabel.items()}
a : Dict = FocalNetConfig(
embed_dim=_lowercase , depths=_lowercase , focal_levels=_lowercase , focal_windows=_lowercase , use_conv_embed=_lowercase , idalabel=_lowercase , labelaid=_lowercase , use_post_layernorm=_lowercase , use_layerscale=_lowercase , )
return config
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->List[Any]:
'''simple docstring'''
if "patch_embed.proj" in name:
a : Any = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a : List[str] = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
a : List[Any] = "encoder." + name
if "encoder.layers" in name:
a : int = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
a : Any = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
a : str = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
a : Union[str, Any] = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
a : Dict = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
a : Any = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
a : str = "layernorm.weight"
if name == "norm.bias":
a : Optional[Any] = "layernorm.bias"
if "head" in name:
a : Tuple = name.replace("head" , "classifier" )
else:
a : int = "focalnet." + name
return name
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : Optional[Any] , _lowercase : Tuple=False ) ->str:
'''simple docstring'''
a : List[Any] = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
a : str = model_name_to_url[model_name]
print("Checkpoint URL: " , _lowercase )
a : Any = torch.hub.load_state_dict_from_url(_lowercase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
a : Any = state_dict.pop(_lowercase )
a : Any = val
a : Any = get_focalnet_config(_lowercase )
a : Optional[int] = FocalNetForImageClassification(_lowercase )
model.eval()
# load state dict
model.load_state_dict(_lowercase )
# verify conversion
a : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
a : Optional[int] = BitImageProcessor(
do_resize=_lowercase , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_lowercase , crop_size=224 , do_normalize=_lowercase , image_mean=_lowercase , image_std=_lowercase , )
a : int = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
a : Dict = processor(images=_lowercase , return_tensors="pt" )
a : Optional[int] = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
a : str = image_transforms(_lowercase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _lowercase , atol=1E-4 )
a : Dict = model(**_lowercase )
a : List[str] = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
a : Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
a : Union[str, Any] = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
a : Dict = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
a : Dict = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
a : Any = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
a : str = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
a : Tuple = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 633
| 0
|
from __future__ import annotations
from collections import Counter
from random import random
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] ) -> Dict:
"""simple docstring"""
__magic_name__ = {}
def _lowercase ( self : Dict , UpperCamelCase__ : str ) -> None:
"""simple docstring"""
__magic_name__ = {}
def _lowercase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : float ) -> None:
"""simple docstring"""
if nodea not in self.connections:
self.add_node(UpperCamelCase__ )
if nodea not in self.connections:
self.add_node(UpperCamelCase__ )
__magic_name__ = probability
def _lowercase ( self : Optional[Any] ) -> list[str]:
"""simple docstring"""
return list(self.connections )
def _lowercase ( self : Any , UpperCamelCase__ : str ) -> str:
"""simple docstring"""
__magic_name__ = 0
__magic_name__ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(A_, A_, A_ )
__magic_name__ = Counter(graph.get_nodes() )
__magic_name__ = start
for _ in range(A_ ):
__magic_name__ = graph.transition(A_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a__ ( A_, A_ ):
'''simple docstring'''
assert isinstance(A_, A_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""", [False, True] )
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = tmp_path / """cache"""
__magic_name__ = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ = TextDatasetReader(A_, cache_dir=A_, keep_in_memory=A_ ).read()
_check_text_dataset(A_, A_ )
@pytest.mark.parametrize(
"""features""", [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
], )
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = tmp_path / """cache"""
__magic_name__ = {"""text""": """string"""}
__magic_name__ = features.copy() if features else default_expected_features
__magic_name__ = (
Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ = TextDatasetReader(A_, features=A_, cache_dir=A_ ).read()
_check_text_dataset(A_, A_ )
@pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] )
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = tmp_path / """cache"""
__magic_name__ = {"""text""": """string"""}
__magic_name__ = TextDatasetReader(A_, cache_dir=A_, split=A_ ).read()
_check_text_dataset(A_, A_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""", [str, list] )
def a__ ( A_, A_, A_ ):
'''simple docstring'''
if issubclass(A_, A_ ):
__magic_name__ = text_path
elif issubclass(A_, A_ ):
__magic_name__ = [text_path]
__magic_name__ = tmp_path / """cache"""
__magic_name__ = {"""text""": """string"""}
__magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read()
_check_text_dataset(A_, A_ )
def a__ ( A_, A_, A_=("train",) ):
'''simple docstring'''
assert isinstance(A_, A_ )
for split in splits:
__magic_name__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""", [False, True] )
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = tmp_path / """cache"""
__magic_name__ = {"""text""": """string"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__magic_name__ = TextDatasetReader({"""train""": text_path}, cache_dir=A_, keep_in_memory=A_ ).read()
_check_text_datasetdict(A_, A_ )
@pytest.mark.parametrize(
"""features""", [
None,
{"""text""": """string"""},
{"""text""": """int32"""},
{"""text""": """float32"""},
], )
def a__ ( A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = tmp_path / """cache"""
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
__magic_name__ = {"""text""": """string"""}
__magic_name__ = features.copy() if features else default_expected_features
__magic_name__ = (
Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None
)
__magic_name__ = TextDatasetReader({"""train""": text_path}, features=A_, cache_dir=A_ ).read()
_check_text_datasetdict(A_, A_ )
@pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] )
def a__ ( A_, A_, A_ ):
'''simple docstring'''
if split:
__magic_name__ = {split: text_path}
else:
__magic_name__ = """train"""
__magic_name__ = {"""train""": text_path, """test""": text_path}
__magic_name__ = tmp_path / """cache"""
__magic_name__ = {"""text""": """string"""}
__magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read()
_check_text_datasetdict(A_, A_, splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 76
| 0
|
def __UpperCAmelCase ( a_):
return 10 - x * x
def __UpperCAmelCase ( a_ , a_):
if equation(_snake_case) * equation(_snake_case) >= 0:
raise ValueError('Wrong space!')
snake_case_ = a
while (b - a) >= 0.01:
# Find middle point
snake_case_ = (a + b) / 2
# Check if middle point is root
if equation(_snake_case) == 0.0:
break
# Decide the side to repeat the steps
if equation(_snake_case) * equation(_snake_case) < 0:
snake_case_ = c
else:
snake_case_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 198
|
"""simple docstring"""
import argparse
import json
import subprocess
def snake_case__ ( _snake_case : str , _snake_case : Any ):
"""simple docstring"""
UpperCamelCase__ = []
UpperCamelCase__ = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
UpperCamelCase__ = subprocess.run(_snake_case , shell=_snake_case , stdout=subprocess.PIPE )
UpperCamelCase__ = output.stdout.decode("utf-8" )
UpperCamelCase__ = json.loads(_snake_case )
UpperCamelCase__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_snake_case )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(_snake_case ) )
if len(_snake_case ) > 0:
UpperCamelCase__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def snake_case__ ( _snake_case : Any ):
"""simple docstring"""
return values.split("," )
A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
A : List[Any] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 516
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase : Optional[int] = {
'''configuration_altclip''': [
'''ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AltCLIPConfig''',
'''AltCLIPTextConfig''',
'''AltCLIPVisionConfig''',
],
'''processing_altclip''': ['''AltCLIPProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = [
'''ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AltCLIPPreTrainedModel''',
'''AltCLIPModel''',
'''AltCLIPTextModel''',
'''AltCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def snake_case_ ( lowerCAmelCase_ : Dict ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase ( __a ):
'''simple docstring'''
@staticmethod
def lowerCAmelCase ( __a : ArgumentParser ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=__a , default=__a , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=__a , help="""Name of the model to download""" )
download_parser.set_defaults(func=__a )
def __init__( self : Dict , __a : str , __a : str , __a : bool , __a : bool ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = model
__lowercase : List[Any] = cache
__lowercase : Any = force
__lowercase : Optional[int] = trust_remote_code
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 649
| 0
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(a , "neck_hidden_sizes" ) )
self.parent.assertTrue(hasattr(a , "num_attention_heads" ) )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int , a : str , a : Union[str, Any]=13 , a : Dict=32 , a : Optional[Any]=2 , a : str=3 , a : Optional[Any]=640 , a : List[str]=4 , a : Optional[int]="silu" , a : Optional[int]=3 , a : str=32 , a : List[Any]=0.1 , a : Optional[Any]=0.1 , a : Optional[int]=0.1 , a : Any=0.02 , a : int=True , a : Dict=True , a : Dict=10 , a : Optional[int]=None , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = parent
SCREAMING_SNAKE_CASE : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE : Any = image_size
SCREAMING_SNAKE_CASE : Dict = patch_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : int = last_hidden_size
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Tuple = conv_kernel_size
SCREAMING_SNAKE_CASE : List[Any] = output_stride
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = classifier_dropout_prob
SCREAMING_SNAKE_CASE : Optional[int] = use_labels
SCREAMING_SNAKE_CASE : Optional[int] = is_training
SCREAMING_SNAKE_CASE : Any = num_labels
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = scope
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : Optional[Any] , a : Optional[Any] , a : Optional[int] , a : Optional[int] , a : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCamelCase ( self : Any , a : Tuple , a : Any , a : List[Any] , a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTForImageClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Optional[int] , a : List[str] , a : Tuple , a : Optional[int] , a : Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTForSemanticSegmentation(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
SCREAMING_SNAKE_CASE : Tuple = model(a , labels=a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{
'feature-extraction': MobileViTModel,
'image-classification': MobileViTForImageClassification,
'image-segmentation': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
lowerCamelCase__ =False
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTModelTester(self )
SCREAMING_SNAKE_CASE : Tuple = MobileViTConfigTester(self , config_class=a , has_text_modality=a )
def __UpperCamelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds" )
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings" )
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="MobileViT does not output attentions" )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = model_class(a )
SCREAMING_SNAKE_CASE : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Dict = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(a : Tuple , a : Tuple , a : Optional[Any] ):
SCREAMING_SNAKE_CASE : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**self._prepare_for_class(a , a ) )
SCREAMING_SNAKE_CASE : List[str] = outputs.hidden_states
SCREAMING_SNAKE_CASE : Optional[Any] = 5
self.assertEqual(len(a ) , a )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
SCREAMING_SNAKE_CASE : Union[str, Any] = 2
for i in range(len(a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Dict = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(a , a , a )
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
def __UpperCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a )
@slow
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : List[Any] = MobileViTModel.from_pretrained(a )
self.assertIsNotNone(a )
def lowerCamelCase__ ( ):
SCREAMING_SNAKE_CASE : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(a )
SCREAMING_SNAKE_CASE : Dict = self.default_image_processor
SCREAMING_SNAKE_CASE : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE : str = image_processor(images=a , return_tensors="pt" ).to(a )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(**a )
# verify the logits
SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
SCREAMING_SNAKE_CASE : Optional[int] = model.to(a )
SCREAMING_SNAKE_CASE : Union[str, Any] = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
SCREAMING_SNAKE_CASE : Tuple = prepare_img()
SCREAMING_SNAKE_CASE : Tuple = image_processor(images=a , return_tensors="pt" ).to(a )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**a )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits
# verify the logits
SCREAMING_SNAKE_CASE : List[Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , a )
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , a , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
SCREAMING_SNAKE_CASE : List[str] = model.to(a )
SCREAMING_SNAKE_CASE : Dict = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
SCREAMING_SNAKE_CASE : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=a , return_tensors="pt" ).to(a )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[int] = model(**a )
SCREAMING_SNAKE_CASE : int = outputs.logits.detach().cpu()
SCREAMING_SNAKE_CASE : int = image_processor.post_process_semantic_segmentation(outputs=a , target_sizes=[(50, 60)] )
SCREAMING_SNAKE_CASE : List[str] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , a )
SCREAMING_SNAKE_CASE : int = image_processor.post_process_semantic_segmentation(outputs=a )
SCREAMING_SNAKE_CASE : Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , a )
| 25
|
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class __lowerCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
_A = 42
_A = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 266
| 0
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class A__ ( A__ ):
"""simple docstring"""
def _UpperCamelCase( self : str , lowerCamelCase__ : str ):
with open(lowerCamelCase__ , encoding="utf-8" ) as input_file:
a__ : List[Any] = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
a__ : int = input_file.read()
a__ : Optional[Any] = regexp.search(lowerCamelCase__ )
return match
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : str ):
with open(lowerCamelCase__ , encoding="utf-8" ) as input_file:
a__ : Optional[int] = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
a__ : Optional[Any] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
a__ : Optional[int] = regexp.finditer(lowerCamelCase__ )
a__ : str = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _UpperCamelCase( self : Optional[Any] ):
a__ : List[str] = Path("./datasets" )
a__ : List[Any] = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowerCamelCase__ ) ):
raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' )
def _UpperCamelCase( self : Optional[Any] ):
a__ : str = Path("./datasets" )
a__ : Tuple = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowerCamelCase__ ) ):
raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 151
|
def UpperCamelCase_ ( __a = 3 , __a = 7 , __a = 1_000_000 ) -> int:
a__ : List[Any] = 0
a__ : int = 1
for current_denominator in range(1 , limit + 1 ):
a__ : Optional[Any] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
a__ : int = current_numerator
a__ : Dict = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 151
| 1
|
"""simple docstring"""
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = None
lowerCAmelCase__ :Any = None
lowerCAmelCase__ :Any = graph
self._normalize_graph(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :int = len(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = None
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if sources is int:
lowerCAmelCase__ :List[Any] = [sources]
if sinks is int:
lowerCAmelCase__ :int = [sinks]
if len(__UpperCAmelCase ) == 0 or len(__UpperCAmelCase ) == 0:
return
lowerCAmelCase__ :List[str] = sources[0]
lowerCAmelCase__ :str = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__UpperCAmelCase ) > 1 or len(__UpperCAmelCase ) > 1:
lowerCAmelCase__ :Any = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowerCAmelCase__ :Optional[int] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowerCAmelCase__ :Optional[Any] = max_input_flow
lowerCAmelCase__ :int = 0
lowerCAmelCase__ :Optional[Any] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowerCAmelCase__ :Any = max_input_flow
lowerCAmelCase__ :int = size - 1
def snake_case ( self ):
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = algorithm(self )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = flow_network
lowerCAmelCase__ :Optional[int] = flow_network.verticesCount
lowerCAmelCase__ :Optional[Any] = flow_network.sourceIndex
lowerCAmelCase__ :Dict = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowerCAmelCase__ :Optional[int] = flow_network.graph
lowerCAmelCase__ :Optional[Any] = False
def snake_case ( self ):
'''simple docstring'''
if not self.executed:
self._algorithm()
lowerCAmelCase__ :List[Any] = True
def snake_case ( self ):
'''simple docstring'''
pass
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
# use this to save your result
lowerCAmelCase__ :Optional[int] = -1
def snake_case ( self ):
'''simple docstring'''
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
lowerCAmelCase__ :int = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowerCAmelCase__ :Union[str, Any] = [0] * self.verticies_count
lowerCAmelCase__ :Optional[int] = [0] * self.verticies_count
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowerCAmelCase__ :List[str] = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowerCAmelCase__ :str = 0
while i < len(__UpperCAmelCase ):
lowerCAmelCase__ :Any = vertices_list[i]
lowerCAmelCase__ :List[Any] = self.heights[vertex_index]
self.process_vertex(__UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__UpperCAmelCase ) )
lowerCAmelCase__ :int = 0
else:
i += 1
lowerCAmelCase__ :Any = sum(self.preflow[self.source_index] )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__UpperCAmelCase , __UpperCAmelCase )
self.relabel(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowerCAmelCase__ :Union[str, Any] = self.heights[to_index]
if min_height is not None:
lowerCAmelCase__ :Optional[Any] = min_height + 1
if __name__ == "__main__":
__A = [0]
__A = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__A = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__A = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__A = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 93
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = ['image_processor', 'tokenizer']
UpperCamelCase__ = 'BlipImageProcessor'
UpperCamelCase__ = 'AutoTokenizer'
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
super().__init__(snake_case_ , snake_case_ )
# add QFormer tokenizer
lowercase =qformer_tokenizer
def __call__( self , snake_case_ = None , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ):
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
lowercase =BatchFeature()
if text is not None:
lowercase =self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
encoding.update(snake_case_ )
lowercase =self.qformer_tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
lowercase =qformer_text_encoding.pop('''input_ids''' )
lowercase =qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
lowercase =self.image_processor(snake_case_ , return_tensors=snake_case_ )
encoding.update(snake_case_ )
return encoding
def _A( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def _A( self , *snake_case_ , **snake_case_ ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _A( self ):
lowercase =self.tokenizer.model_input_names
lowercase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _A( self , snake_case_ , **snake_case_ ):
if os.path.isfile(snake_case_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
lowercase =os.path.join(snake_case_ , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(snake_case_ )
return super().save_pretrained(snake_case_ , **snake_case_ )
@classmethod
def _A( cls , snake_case_ , **snake_case_ ):
lowercase =AutoTokenizer.from_pretrained(snake_case_ , subfolder='''qformer_tokenizer''' )
lowercase =cls._get_arguments_from_pretrained(snake_case_ , **snake_case_ )
args.append(snake_case_ )
return cls(*snake_case_ )
| 72
| 0
|
"""simple docstring"""
from __future__ import annotations
__UpperCAmelCase = list[list[int]]
# assigning initial values to the grid
__UpperCAmelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__UpperCAmelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def _snake_case ( lowercase__ : Matrix , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def _snake_case ( lowercase__ : Matrix ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def _snake_case ( lowercase__ : Matrix ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(lowercase__ ):
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 1_0 ):
if is_safe(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
lowerCAmelCase_ :str = digit
if sudoku(lowercase__ ) is not None:
return grid
lowerCAmelCase_ :Dict = 0
return None
def _snake_case ( lowercase__ : Matrix ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(lowercase__ , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
__UpperCAmelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 256
|
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__UpperCAmelCase = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
__UpperCAmelCase = F"""https://www.google.com/search?q={query}&num=100"""
__UpperCAmelCase = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
__UpperCAmelCase = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
__UpperCAmelCase = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 256
| 1
|
def __snake_case ( lowerCAmelCase_ = 1_0_0_0_0_0_0 ) -> int:
SCREAMING_SNAKE_CASE__ = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , lowerCAmelCase_ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 100
|
"""simple docstring"""
from __future__ import annotations
def A_ ( snake_case_ : list ,snake_case_ : int ):
'''simple docstring'''
# Checks if the entire collection has been sorted
if len(snake_case_ ) <= 1 or n <= 1:
return
insert_next(snake_case_ ,n - 1 )
rec_insertion_sort(snake_case_ ,n - 1 )
def A_ ( snake_case_ : list ,snake_case_ : int ):
'''simple docstring'''
# Checks order between adjacent elements
if index >= len(snake_case_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
UpperCamelCase , UpperCamelCase : List[Any] = (
collection[index],
collection[index - 1],
)
insert_next(snake_case_ ,index + 1 )
if __name__ == "__main__":
__A : Optional[Any] = input('''Enter integers separated by spaces: ''')
__A : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 499
| 0
|
import math
import sys
def _a ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
UpperCamelCase__ : Dict = ''''''
try:
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as binary_file:
UpperCamelCase__ : List[Any] = binary_file.read()
for dat in data:
UpperCamelCase__ : int = F"{dat:08b}"
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def _a ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = {'''0''': '''0''', '''1''': '''1'''}
UpperCamelCase__ , UpperCamelCase__ : str = '''''', ''''''
UpperCamelCase__ : Optional[Any] = len(SCREAMING_SNAKE_CASE )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase__ : Any = lexicon[curr_string]
result += last_match_id
UpperCamelCase__ : Union[str, Any] = last_match_id + '''0'''
if math.loga(SCREAMING_SNAKE_CASE ).is_integer():
UpperCamelCase__ : int = {}
for curr_key in list(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : List[Any] = lexicon.pop(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = new_lex
UpperCamelCase__ : Optional[Any] = last_match_id + '''1'''
index += 1
UpperCamelCase__ : Optional[Any] = ''''''
return result
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
UpperCamelCase__ : List[str] = 8
try:
with open(SCREAMING_SNAKE_CASE , '''wb''' ) as opened_file:
UpperCamelCase__ : Optional[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(SCREAMING_SNAKE_CASE , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def _a ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCamelCase__ : Dict = data_bits[counter:]
UpperCamelCase__ : Any = data_bits[counter + 1 :]
return data_bits
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
UpperCamelCase__ : Dict = read_file_binary(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = remove_prefix(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = decompress_data(SCREAMING_SNAKE_CASE )
write_file_binary(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 106
|
__UpperCamelCase : List[Any] = 256
# Modulus to hash a string
__UpperCamelCase : Union[str, Any] = 100_0003
def _a ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] = len(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = len(SCREAMING_SNAKE_CASE )
if p_len > t_len:
return False
UpperCamelCase__ : Any = 0
UpperCamelCase__ : str = 0
UpperCamelCase__ : List[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Any = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
UpperCamelCase__ : List[str] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
UpperCamelCase__ : Dict = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
UpperCamelCase__ : Optional[int] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _a ( ):
"""simple docstring"""
UpperCamelCase__ : Tuple = '''abc1abc12'''
UpperCamelCase__ : Dict = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
UpperCamelCase__ : List[str] = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test 2)
UpperCamelCase__ : Optional[int] = '''ABABX'''
UpperCamelCase__ : int = '''ABABZABABYABABX'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test 3)
UpperCamelCase__ : int = '''AAAB'''
UpperCamelCase__ : str = '''ABAAAAAB'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test 4)
UpperCamelCase__ : Union[str, Any] = '''abcdabcy'''
UpperCamelCase__ : List[str] = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test 5)
UpperCamelCase__ : Tuple = '''Lü'''
UpperCamelCase__ : Any = '''Lüsai'''
assert rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = '''Lue'''
assert not rabin_karp(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 106
| 1
|
import json
import sys
def lowercase__( A , A ):
with open(A , encoding='utf-8' ) as f:
snake_case__ : Dict = json.load(A )
snake_case__ : List[str] = ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(A ):
snake_case__ : Any = results[benchmark_name]
snake_case__ : Optional[Any] = benchmark_name.split('/' )[-1]
output_md.append(f'''### Benchmark: {benchmark_file_name}''' )
snake_case__ : Union[str, Any] = '| metric |'
snake_case__ : List[str] = '|--------|'
snake_case__ : Dict = '| new / old (diff) |'
for metric_name in sorted(A ):
snake_case__ : Dict = benchmark_res[metric_name]
snake_case__ : List[Any] = metric_vals['new']
snake_case__ : Any = metric_vals.get('old' , A )
snake_case__ : Optional[Any] = metric_vals.get('diff' , A )
snake_case__ : Optional[Any] = f''' {new_val:f}''' if isinstance(A , (int, float) ) else 'None'
if old_val is not None:
val_str += f''' / {old_val:f}''' if isinstance(A , (int, float) ) else "None"
if dif_val is not None:
val_str += f''' ({dif_val:f})''' if isinstance(A , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(A , 'w' , encoding='utf-8' ) as f:
f.writelines('\n'.join(A ) )
if __name__ == "__main__":
lowerCamelCase : int = sys.argv[1]
lowerCamelCase : List[Any] = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 170
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class snake_case__ ( UpperCamelCase_ ):
@staticmethod
@abstractmethod
def UpperCAmelCase__ ( _lowerCamelCase : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def UpperCAmelCase__ ( self : Any ):
raise NotImplementedError()
| 170
| 1
|
'''simple docstring'''
lowerCAmelCase__ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCAmelCase__ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCAmelCase__ = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
assert len(str(A__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__lowercase = year // 100
__lowercase = (5 * (century % 4) + 2) % 7
__lowercase = year % 100
__lowercase = centurian % 12
__lowercase = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__lowercase = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__lowercase = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 624
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 624
| 1
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase :
def __init__( self , snake_case__ , snake_case__=3 , snake_case__=32 , snake_case__=3 , snake_case__=10 , snake_case__=[10, 20, 30, 40] , snake_case__=[1, 1, 2, 1] , snake_case__=True , snake_case__=True , snake_case__="relu" , snake_case__=3 , snake_case__=None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = parent
_SCREAMING_SNAKE_CASE : Any = batch_size
_SCREAMING_SNAKE_CASE : Any = image_size
_SCREAMING_SNAKE_CASE : Dict = num_channels
_SCREAMING_SNAKE_CASE : Any = embeddings_size
_SCREAMING_SNAKE_CASE : Any = hidden_sizes
_SCREAMING_SNAKE_CASE : List[str] = depths
_SCREAMING_SNAKE_CASE : Optional[int] = is_training
_SCREAMING_SNAKE_CASE : Dict = use_labels
_SCREAMING_SNAKE_CASE : List[str] = hidden_act
_SCREAMING_SNAKE_CASE : List[Any] = num_labels
_SCREAMING_SNAKE_CASE : Optional[int] = scope
_SCREAMING_SNAKE_CASE : Optional[int] = len(__lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
_SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = TFRegNetModel(config=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model(__lowerCAmelCase , training=__lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
_SCREAMING_SNAKE_CASE : Tuple = TFRegNetForImageClassification(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = model(__lowerCAmelCase , labels=__lowerCAmelCase , training=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = config_and_inputs
_SCREAMING_SNAKE_CASE : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase ( _a , _a , unittest.TestCase ):
A__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
A__ = (
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
A__ = False
A__ = False
A__ = False
A__ = False
A__ = False
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = TFRegNetModelTester(self )
_SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : str = model_class(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE : Any = [*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
_SCREAMING_SNAKE_CASE : Optional[int] = model_class(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : int = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) , training=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE : List[Any] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE : List[Any] = layer_type
_SCREAMING_SNAKE_CASE : Optional[int] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE : List[Any] = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(snake_case__ , snake_case__ , snake_case__ , snake_case__={} ):
_SCREAMING_SNAKE_CASE : List[str] = model(__lowerCAmelCase , return_dict=__lowerCAmelCase , **__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Tuple = model(__lowerCAmelCase , return_dict=__lowerCAmelCase , **__lowerCAmelCase ).to_tuple()
def recursive_check(snake_case__ , snake_case__ ):
if isinstance(__lowerCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCAmelCase , __lowerCAmelCase ):
recursive_check(__lowerCAmelCase , __lowerCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__lowerCAmelCase , __lowerCAmelCase ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'''
) , )
recursive_check(__lowerCAmelCase , __lowerCAmelCase )
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE : Optional[Any] = model_class(__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : str = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Dict = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
check_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_SCREAMING_SNAKE_CASE : int = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
check_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Any = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
check_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , {"output_hidden_states": True} )
_SCREAMING_SNAKE_CASE : List[Any] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
check_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , {"output_hidden_states": True} )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE : List[str] = TFRegNetModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def _lowerCAmelCase ( ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_SCREAMING_SNAKE_CASE : Tuple = self.default_image_processor
_SCREAMING_SNAKE_CASE : List[str] = prepare_img()
_SCREAMING_SNAKE_CASE : List[Any] = image_processor(images=__lowerCAmelCase , return_tensors="tf" )
# forward pass
_SCREAMING_SNAKE_CASE : List[str] = model(**__lowerCAmelCase , training=__lowerCAmelCase )
# verify the logits
_SCREAMING_SNAKE_CASE : int = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
_SCREAMING_SNAKE_CASE : int = tf.constant([-0.4_180, -1.5_051, -3.4_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 )
| 572
|
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : Dict = CodeGenTokenizer
snake_case : Dict = CodeGenTokenizerFast
snake_case : Tuple = True
snake_case : Optional[int] = {"""add_prefix_space""": True}
snake_case : int = False
def _lowerCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
UpperCamelCase__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
UpperCamelCase__ = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCamelCase__ = {"""unk_token""": """<unk>"""}
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__lowerCAmelCase ) )
def _lowerCamelCase ( self , **__lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCamelCase ( self , **__lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = """lower newer"""
return input_text, output_text
def _lowerCamelCase ( self ):
UpperCamelCase__ = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCamelCase__ = """lower newer"""
UpperCamelCase__ = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
UpperCamelCase__ = tokenizer.tokenize(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def _lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_rust_tokenizer(add_prefix_space=__lowerCAmelCase )
UpperCamelCase__ = """lower newer"""
# Testing tokenization
UpperCamelCase__ = tokenizer.tokenize(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
UpperCamelCase__ = rust_tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids without special tokens
UpperCamelCase__ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
UpperCamelCase__ = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids with special tokens
UpperCamelCase__ = self.get_rust_tokenizer(add_prefix_space=__lowerCAmelCase )
UpperCamelCase__ = tokenizer.encode(__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
UpperCamelCase__ = rust_tokenizer.encode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing the unknown token
UpperCamelCase__ = tokens + [rust_tokenizer.unk_token]
UpperCamelCase__ = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def _lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def _lowerCamelCase ( self , __lowerCAmelCase=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
# Simple input
UpperCamelCase__ = """This is a simple input"""
UpperCamelCase__ = ["""This is a simple input 1""", """This is a simple input 2"""]
UpperCamelCase__ = ("""This is a simple input""", """This is a pair""")
UpperCamelCase__ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" )
# Simple input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" , )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" )
# Pair input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" , )
def _lowerCamelCase ( self ):
UpperCamelCase__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
UpperCamelCase__ = """This is a simple input"""
UpperCamelCase__ = ["""This is a simple input looooooooong""", """This is a simple input"""]
UpperCamelCase__ = ("""This is a simple input""", """This is a pair""")
UpperCamelCase__ = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
UpperCamelCase__ = tokenizer.pad_token_id
UpperCamelCase__ = tokenizer(__lowerCAmelCase , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
UpperCamelCase__ = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncate=__lowerCAmelCase , return_tensors="""np""" )
UpperCamelCase__ = tokenizer(*__lowerCAmelCase , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
UpperCamelCase__ = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncate=__lowerCAmelCase , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def _lowerCamelCase ( self ):
UpperCamelCase__ = """$$$"""
UpperCamelCase__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__lowerCAmelCase , add_bos_token=__lowerCAmelCase )
UpperCamelCase__ = """This is a simple input"""
UpperCamelCase__ = ["""This is a simple input 1""", """This is a simple input 2"""]
UpperCamelCase__ = tokenizer.bos_token_id
UpperCamelCase__ = tokenizer(__lowerCAmelCase )
UpperCamelCase__ = tokenizer(__lowerCAmelCase )
self.assertEqual(out_s.input_ids[0] , __lowerCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
UpperCamelCase__ = tokenizer.decode(out_s.input_ids )
UpperCamelCase__ = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __lowerCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def _lowerCamelCase ( self ):
UpperCamelCase__ = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
UpperCamelCase__ = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
UpperCamelCase__ = """\nif len_a > len_b: result = a\nelse: result = b"""
UpperCamelCase__ = tokenizer.encode(__lowerCAmelCase )
UpperCamelCase__ = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
UpperCamelCase__ = tokenizer.decode(__lowerCAmelCase , truncate_before_pattern=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self ):
pass
| 619
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class A ( lowerCamelCase_ ):
def __init__( self : Optional[Any] , __UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
UpperCamelCase_ = data
def __iter__( self : str ) -> int:
"""simple docstring"""
for element in self.data:
yield element
def a_ ( __snake_case=True ) -> str:
'''simple docstring'''
UpperCamelCase_ = Accelerator(even_batches=__snake_case )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def a_ ( __snake_case , __snake_case , __snake_case , __snake_case = False ) -> Dict:
'''simple docstring'''
if iterable:
UpperCamelCase_ = DummyIterableDataset(torch.as_tensor(range(__snake_case ) ) )
else:
UpperCamelCase_ = TensorDataset(torch.as_tensor(range(__snake_case ) ) )
UpperCamelCase_ = DataLoader(__snake_case , batch_size=__snake_case )
UpperCamelCase_ = accelerator.prepare(__snake_case )
return dl
def a_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase_ = create_dataloader(accelerator=__snake_case , dataset_size=__snake_case , batch_size=__snake_case )
UpperCamelCase_ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def a_ ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase_ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
__snake_case , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
__snake_case , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def a_ ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase_ = create_accelerator(even_batches=__snake_case )
verify_dataloader_batch_sizes(
__snake_case , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
__snake_case , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def a_ ( ) -> Any:
'''simple docstring'''
UpperCamelCase_ = create_accelerator(even_batches=__snake_case )
UpperCamelCase_ = torch.nn.Linear(1 , 1 )
UpperCamelCase_ = accelerator.prepare(__snake_case )
UpperCamelCase_ = create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
UpperCamelCase_ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(__snake_case ):
UpperCamelCase_ = ddp_model(batch[0].float() )
UpperCamelCase_ = output.sum()
loss.backward()
batch_idxs.append(__snake_case )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def a_ ( __snake_case ) -> Tuple:
'''simple docstring'''
with warnings.catch_warnings(record=__snake_case ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , __snake_case )
assert "only supported for multi-GPU" in str(w[-1].message )
def a_ ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = create_accelerator(even_batches=__snake_case )
UpperCamelCase_ = torch.nn.Linear(1 , 1 )
UpperCamelCase_ = accelerator.prepare(__snake_case )
UpperCamelCase_ = create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
UpperCamelCase_ = create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__snake_case ):
UpperCamelCase_ = train_dl.batch_sampler.even_batches
UpperCamelCase_ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def a_ ( ) -> Dict:
'''simple docstring'''
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = create_accelerator(even_batches=__snake_case )
UpperCamelCase_ = torch.nn.Linear(1 , 1 )
UpperCamelCase_ = accelerator.prepare(__snake_case )
create_dataloader(__snake_case , dataset_size=3 , batch_size=1 , iterable=__snake_case )
UpperCamelCase_ = create_dataloader(__snake_case , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings('ignore' )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__snake_case ):
UpperCamelCase_ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def a_ ( ) -> str:
'''simple docstring'''
UpperCamelCase_ = create_accelerator()
UpperCamelCase_ = torch.nn.Linear(1 , 1 )
UpperCamelCase_ = accelerator.prepare(__snake_case )
create_dataloader(__snake_case , dataset_size=3 , batch_size=1 , iterable=__snake_case )
with warnings.catch_warnings(record=__snake_case ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=__snake_case ):
pass
assert issubclass(w[-1].category , __snake_case )
assert "only supported for map-style datasets" in str(w[-1].message )
def a_ ( ) -> int:
'''simple docstring'''
UpperCamelCase_ = create_accelerator()
accelerator.print('Test that even_batches variable ensures uniform batches across processes' )
test_default_ensures_even_batch_sizes()
accelerator.print('Run tests with even_batches disabled' )
test_can_disable_even_batches()
accelerator.print('Test joining uneven inputs' )
test_can_join_uneven_inputs()
accelerator.print('Test overriding even_batches when joining uneven inputs' )
test_join_can_override_even_batches()
accelerator.print('Test overriding even_batches for mixed dataloader types' )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('Test overriding even_batches raises a warning for iterable dataloaders' )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('Test join with non DDP distributed raises warning' )
UpperCamelCase_ = accelerator.state.distributed_type
UpperCamelCase_ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(__snake_case )
UpperCamelCase_ = original_state
if __name__ == "__main__":
main()
| 559
|
from sklearn.metrics import mean_squared_error
import datasets
__a : Union[str, Any] = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
__a : Dict = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
__a : Any = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def lowercase__ ( self : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Union[str, Any]="uniform_average" , __UpperCAmelCase : List[Any]=True ) -> int:
"""simple docstring"""
UpperCamelCase_ = mean_squared_error(
__UpperCAmelCase , __UpperCAmelCase , sample_weight=__UpperCAmelCase , multioutput=__UpperCAmelCase , squared=__UpperCAmelCase )
return {"mse": mse}
| 559
| 1
|
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase : Optional[int] =16
_lowercase : List[str] =32
def lowerCAmelCase_ ( _lowercase : Accelerator , _lowercase : int = 16) -> Optional[int]:
"""simple docstring"""
a__ : Tuple = AutoTokenizer.from_pretrained("""bert-base-cased""")
a__ : Optional[int] = load_dataset("""glue""" , """mrpc""")
def tokenize_function(_lowercase : Optional[int]):
# max_length=None => use the model max length (it's actually the default)
a__ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowercase , max_length=_lowercase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a__ : Optional[int] = datasets.map(
_lowercase , batched=_lowercase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ : Any = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(_lowercase : int):
# On TPU it's best to pad everything to the same length or training will be very slow.
a__ : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a__ : List[Any] = 16
elif accelerator.mixed_precision != "no":
a__ : List[Any] = 8
else:
a__ : Any = None
return tokenizer.pad(
_lowercase , padding="""longest""" , max_length=_lowercase , pad_to_multiple_of=_lowercase , return_tensors="""pt""" , )
# Instantiate dataloaders.
a__ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase , drop_last=_lowercase)
a__ : Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def lowerCAmelCase_ ( _lowercase : Optional[Any] , _lowercase : int) -> List[str]:
"""simple docstring"""
# Initialize accelerator
a__ : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ : List[Any] = config["""lr"""]
a__ : int = int(config["""num_epochs"""])
a__ : Tuple = int(config["""seed"""])
a__ : Dict = int(config["""batch_size"""])
a__ : Dict = evaluate.load("""glue""" , """mrpc""")
# If the batch size is too big we use gradient accumulation
a__ : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
a__ : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
a__ : str = MAX_GPU_BATCH_SIZE
set_seed(_lowercase)
a__ , a__ : Any = get_dataloaders(_lowercase , _lowercase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_lowercase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a__ : Dict = model.to(accelerator.device)
# Instantiate optimizer
a__ : Any = AdamW(params=model.parameters() , lr=_lowercase)
# Instantiate scheduler
a__ : List[str] = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=100 , num_training_steps=(len(_lowercase) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ : Tuple = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase)
# Now we train the model
for epoch in range(_lowercase):
model.train()
for step, batch in enumerate(_lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
a__ : List[str] = model(**_lowercase)
a__ : List[str] = outputs.loss
a__ : str = loss / gradient_accumulation_steps
accelerator.backward(_lowercase)
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
a__ : Any = model(**_lowercase)
a__ : Optional[Any] = outputs.logits.argmax(dim=-1)
a__ , a__ : Dict = accelerator.gather_for_metrics((predictions, batch["""labels"""]))
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
a__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _lowercase)
def lowerCAmelCase_ ( ) -> Dict:
"""simple docstring"""
a__ : Union[str, Any] = argparse.ArgumentParser(description="""Simple example of training script.""")
parser.add_argument(
"""--mixed_precision""" , type=_lowercase , default=_lowercase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""")
a__ : Optional[int] = parser.parse_args()
a__ : Optional[Any] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_lowercase , _lowercase)
if __name__ == "__main__":
main()
| 136
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case__ (A__ , A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :Optional[Any] = StableDiffusionXLImgaImgPipeline
__lowerCAmelCase :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
__lowerCAmelCase :Optional[Any] = PipelineTesterMixin.required_optional_params - {"latents"}
__lowerCAmelCase :Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__lowerCAmelCase :Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase :Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
a__ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__lowercase , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
a__ : List[Any] = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
a__ : Tuple = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
a__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=3_2 , )
a__ : Optional[int] = CLIPTextModel(__lowercase )
a__ : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowercase )
a__ : Union[str, Any] = CLIPTextModelWithProjection(__lowercase )
a__ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__lowercase )
a__ : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase=0 ) -> Tuple:
"""simple docstring"""
a__ : Any = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__lowercase ) ).to(__lowercase )
a__ : Union[str, Any] = image / 2 + 0.5
if str(__lowercase ).startswith("""mps""" ):
a__ : Dict = torch.manual_seed(__lowercase )
else:
a__ : List[str] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
a__ : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.7_5,
}
return inputs
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
a__ : Any = self.get_dummy_components()
a__ : List[Any] = StableDiffusionXLImgaImgPipeline(**__lowercase )
a__ : List[Any] = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
a__ : Dict = self.get_dummy_inputs(__lowercase )
a__ : str = sd_pipe(**__lowercase ).images
a__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
a__ : Union[str, Any] = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : Union[str, Any] = self.get_dummy_components()
a__ : List[str] = StableDiffusionXLImgaImgPipeline(**__lowercase )
a__ : Optional[int] = sd_pipe.to(__lowercase )
a__ : int = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
# forward without prompt embeds
a__ : Any = self.get_dummy_inputs(__lowercase )
a__ : Optional[int] = 3 * ["""this is a negative prompt"""]
a__ : List[str] = negative_prompt
a__ : Any = 3 * [inputs["""prompt"""]]
a__ : Union[str, Any] = sd_pipe(**__lowercase )
a__ : Dict = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
a__ : Optional[Any] = self.get_dummy_inputs(__lowercase )
a__ : Dict = 3 * ["""this is a negative prompt"""]
a__ : int = 3 * [inputs.pop("""prompt""" )]
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : List[Any] = sd_pipe.encode_prompt(__lowercase , negative_prompt=__lowercase )
a__ : Any = sd_pipe(
**__lowercase , prompt_embeds=__lowercase , negative_prompt_embeds=__lowercase , pooled_prompt_embeds=__lowercase , negative_pooled_prompt_embeds=__lowercase , )
a__ : Optional[int] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=0 ) -> List[str]:
"""simple docstring"""
a__ : List[Any] = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
a__ : List[Any] = np.random.RandomState(__lowercase ).standard_normal((1, 4, 6_4, 6_4) )
a__ : Dict = torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase )
a__ : List[Any] = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
a__ : Optional[int] = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
a__ : Any = self.get_inputs(__lowercase )
a__ : List[str] = pipe(**__lowercase ).images
a__ : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a__ : Any = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 136
| 1
|
"""simple docstring"""
def A__ ( A__ , A__ = 0 ) -> list:
'''simple docstring'''
_UpperCAmelCase = length or len(A__ )
_UpperCAmelCase = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_UpperCAmelCase , _UpperCAmelCase = list_data[i + 1], list_data[i]
_UpperCAmelCase = True
return list_data if not swapped else bubble_sort(A__ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 579
|
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
SCREAMING_SNAKE_CASE_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
SCREAMING_SNAKE_CASE_ = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Optional[int]:
_UpperCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
_UpperCAmelCase = self.diffusers_dir
shutil.copy(
os.path.join(snake_case_ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def __A ( self ) -> List[str]:
_UpperCAmelCase = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def __A ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=None ) -> Optional[int]:
_UpperCAmelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCAmelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_UpperCAmelCase = black.format_str(snake_case_ , mode=snake_case_ )
_UpperCAmelCase = os.path.join(self.diffusers_dir , "new_code.py" )
with open(snake_case_ , "w" , newline="\n" ) as f:
f.write(snake_case_ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(snake_case_ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=snake_case_ )
with open(snake_case_ , "r" ) as f:
self.assertTrue(f.read() , snake_case_ )
def __A ( self ) -> int:
_UpperCAmelCase = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(snake_case_ , snake_case_ )
def __A ( self ) -> List[str]:
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , snake_case_ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , snake_case_ ) , )
# Copy consistency with a really long name
_UpperCAmelCase = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , snake_case_ , snake_case_ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , snake_case_ , overwrite_result=re.sub("DDPM" , "Test" , snake_case_ ) , )
| 579
| 1
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_UpperCAmelCase : Optional[Any] = """docs/source/en/_toctree.yml"""
def snake_case__ ( UpperCamelCase ) -> List[str]:
_UpperCamelCase : int = defaultdict(UpperCamelCase )
for doc in model_doc:
counts[doc["local"]] += 1
_UpperCamelCase : str = [key for key, value in counts.items() if value > 1]
_UpperCamelCase : Union[str, Any] = []
for duplicate_key in duplicates:
_UpperCamelCase : str = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(UpperCamelCase ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(UpperCamelCase ,key=lambda UpperCamelCase : s["title"].lower() )
def snake_case__ ( UpperCamelCase=False ) -> Optional[Any]:
with open(UpperCamelCase ,encoding='''utf-8''' ) as f:
_UpperCamelCase : int = yaml.safe_load(f.read() )
# Get to the API doc
_UpperCamelCase : List[str] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_UpperCamelCase : Optional[Any] = content[api_idx]['''sections''']
# Then to the model doc
_UpperCamelCase : Tuple = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
_UpperCamelCase : Dict = api_doc[model_idx]['''sections''']
_UpperCamelCase : Any = [(idx, section) for idx, section in enumerate(UpperCamelCase ) if '''sections''' in section]
_UpperCamelCase : int = False
for idx, modality_doc in modalities_docs:
_UpperCamelCase : Optional[int] = modality_doc['''sections''']
_UpperCamelCase : Optional[int] = clean_model_doc_toc(UpperCamelCase )
if old_modality_doc != new_modality_doc:
_UpperCamelCase : str = True
if overwrite:
_UpperCamelCase : Union[str, Any] = new_modality_doc
if diff:
if overwrite:
_UpperCamelCase : Union[str, Any] = model_doc
_UpperCamelCase : List[str] = api_doc
with open(UpperCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(yaml.dump(UpperCamelCase ,allow_unicode=UpperCamelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_UpperCAmelCase : Tuple = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 683
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
"""simple docstring"""
@staticmethod
def _lowercase ( *_snake_case , **_snake_case ) -> str:
pass
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _lowercase ( self , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
_UpperCamelCase : int = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_UpperCamelCase : Any = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def _lowercase ( self , _snake_case , _snake_case ) -> List[str]:
_UpperCamelCase : int = vqa_pipeline(_snake_case , top_k=1 )
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}],
[{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}],
] , )
@require_torch
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Any = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' )
_UpperCamelCase : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase : Optional[int] = '''How many cats are there?'''
_UpperCamelCase : str = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2 )
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}, {'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}] )
_UpperCamelCase : List[Any] = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}, {'''score''': ANY(_snake_case ), '''answer''': ANY(_snake_case )}] )
@slow
@require_torch
def _lowercase ( self ) -> List[Any]:
_UpperCamelCase : Any = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' )
_UpperCamelCase : Dict = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCamelCase : Optional[Any] = '''How many cats are there?'''
_UpperCamelCase : str = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_UpperCamelCase : str = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] )
_UpperCamelCase : Dict = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_snake_case , decimals=4 ) , [[{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def _lowercase ( self ) -> List[Any]:
pass
| 683
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__SCREAMING_SNAKE_CASE = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = ['PerceiverFeatureExtractor']
__SCREAMING_SNAKE_CASE = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 719
|
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = {
'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
__UpperCamelCase = "data2vec-audio"
def __init__( self : Tuple , A__ : List[str]=3_2 , A__ : Optional[int]=7_6_8 , A__ : List[str]=1_2 , A__ : Any=1_2 , A__ : Any=3_0_7_2 , A__ : Optional[Any]="gelu" , A__ : Any=0.1 , A__ : List[Any]=0.1 , A__ : Dict=0.1 , A__ : Tuple=0.0 , A__ : str=0.1 , A__ : Union[str, Any]=0.1 , A__ : List[Any]=0.02 , A__ : Optional[Any]=1E-5 , A__ : Dict="gelu" , A__ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , A__ : Any=(5, 2, 2, 2, 2, 2, 2) , A__ : str=(1_0, 3, 3, 3, 3, 2, 2) , A__ : str=False , A__ : Any=1_6 , A__ : Optional[Any]=1_9 , A__ : List[Any]=5 , A__ : Optional[Any]=0.05 , A__ : Optional[Any]=1_0 , A__ : Dict=2 , A__ : int=0.0 , A__ : Optional[Any]=1_0 , A__ : str=0 , A__ : Any="sum" , A__ : Optional[int]=False , A__ : Dict=False , A__ : Dict=2_5_6 , A__ : List[str]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , A__ : str=(5, 3, 3, 1, 1) , A__ : Any=(1, 2, 3, 1, 1) , A__ : Optional[int]=5_1_2 , A__ : List[str]=0 , A__ : Optional[int]=1 , A__ : int=2 , A__ : List[str]=False , A__ : Dict=3 , A__ : Any=2 , A__ : List[str]=3 , A__ : Any=None , **A__ : List[str] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**A__ , pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ )
a__ : Optional[Any] = hidden_size
a__ : Union[str, Any] = feat_extract_activation
a__ : str = list(A__ )
a__ : Dict = list(A__ )
a__ : int = list(A__ )
a__ : Dict = conv_bias
a__ : Tuple = num_conv_pos_embeddings
a__ : Tuple = num_conv_pos_embedding_groups
a__ : str = conv_pos_kernel_size
a__ : Dict = len(self.conv_dim )
a__ : str = num_hidden_layers
a__ : List[Any] = intermediate_size
a__ : List[Any] = hidden_act
a__ : str = num_attention_heads
a__ : Tuple = hidden_dropout
a__ : Union[str, Any] = attention_dropout
a__ : Dict = activation_dropout
a__ : str = feat_proj_dropout
a__ : Optional[Any] = final_dropout
a__ : List[str] = layerdrop
a__ : Optional[int] = layer_norm_eps
a__ : str = initializer_range
a__ : Union[str, Any] = vocab_size
a__ : int = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a__ : Tuple = mask_time_prob
a__ : str = mask_time_length
a__ : Dict = mask_time_min_masks
a__ : Tuple = mask_feature_prob
a__ : Union[str, Any] = mask_feature_length
a__ : Optional[Any] = mask_feature_min_masks
# ctc loss
a__ : Optional[Any] = ctc_loss_reduction
a__ : Any = ctc_zero_infinity
# adapter
a__ : Dict = add_adapter
a__ : int = adapter_kernel_size
a__ : Tuple = adapter_stride
a__ : Union[str, Any] = num_adapter_layers
a__ : Any = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a__ : Union[str, Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a__ : int = list(A__ )
a__ : Union[str, Any] = list(A__ )
a__ : Dict = list(A__ )
a__ : Dict = xvector_output_dim
@property
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return math.prod(self.conv_stride )
| 340
| 0
|
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> float:
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336
|
def _snake_case( SCREAMING_SNAKE_CASE__ = 4_000_000 ) -> int:
lowercase : List[str] = [0, 1]
lowercase : str = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowercase : Optional[Any] = 0
for j in range(len(SCREAMING_SNAKE_CASE__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 336
| 1
|
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a , a , a ):
if len(a ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(a )
or left < -len(a )
or right >= len(a )
or right < -len(a )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
__a = (left + right) >> 1 # the middle
__a = find_max(a , a , a ) # find max in range[left, mid]
__a = find_max(a , mid + 1 , a ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 702
|
"""simple docstring"""
def _lowerCamelCase( a ):
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def _lowerCamelCase( a ):
__a = 0
__a = number
while duplicate > 0:
__a , __a = divmod(a , 1_0 )
fact_sum += factorial(a )
return fact_sum == number
if __name__ == "__main__":
print("""Program to check whether a number is a Krisnamurthy Number or not.""")
SCREAMING_SNAKE_CASE__:Optional[Any] = int(input("""Enter number: """).strip())
print(
F'''{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number.'''
)
| 67
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = CTRLTokenizer
__lowerCAmelCase = False
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__a : Optional[Any] = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
__a : List[str] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__a : Dict = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
__a : str = {'''unk_token''': '''<unk>'''}
__a : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_UpperCAmelCase ) )
def _lowerCamelCase ( self , **_UpperCAmelCase ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Dict = '''adapt react readapt apt'''
__a : Tuple = '''adapt react readapt apt'''
return input_text, output_text
def _lowerCamelCase ( self ):
__a : List[str] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__a : str = '''adapt react readapt apt'''
__a : List[str] = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
__a : List[str] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__a : Tuple = tokens + [tokenizer.unk_token]
__a : str = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
| 52
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
"configuration_autoformer": [
"AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AutoformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"AutoformerForPrediction",
"AutoformerModel",
"AutoformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 386
| 0
|
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__lowerCAmelCase : List[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__lowerCAmelCase : str = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__lowerCAmelCase : int = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__lowerCAmelCase : List[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__lowerCAmelCase : List[str] = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__lowerCAmelCase : Optional[int] = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = re.finditer('''.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)''' , UpperCamelCase__ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__UpperCAmelCase = {
config.replace('''Config''' , '''''' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
__UpperCAmelCase = collections.defaultdict(UpperCamelCase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(UpperCamelCase__ ):
__UpperCAmelCase = None
if _re_tf_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = tf_models
__UpperCAmelCase = _re_tf_models.match(UpperCamelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = flax_models
__UpperCAmelCase = _re_flax_models.match(UpperCamelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase__ ) is not None:
__UpperCAmelCase = pt_models
__UpperCAmelCase = _re_pt_models.match(UpperCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase__ ) > 0:
if attr_name in model_prefix_to_model_type:
__UpperCAmelCase = True
break
# Try again after removing the last word in the name
__UpperCAmelCase = ''''''.join(camel_case_split(UpperCamelCase__ )[:-1] )
__UpperCAmelCase = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
__UpperCAmelCase = list(UpperCamelCase__ )
all_models.sort()
__UpperCAmelCase = {'''model_type''': all_models}
__UpperCAmelCase = [pt_models[t] for t in all_models]
__UpperCAmelCase = [tf_models[t] for t in all_models]
__UpperCAmelCase = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
__UpperCAmelCase = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoProcessor'''
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
__UpperCAmelCase = '''AutoTokenizer'''
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
__UpperCAmelCase = '''AutoFeatureExtractor'''
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
__UpperCAmelCase = '''AutoTokenizer'''
__UpperCAmelCase = [processors[t] for t in all_models]
return pd.DataFrame(UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : List[str] ):
"""simple docstring"""
__UpperCAmelCase = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
__UpperCAmelCase = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
__UpperCAmelCase = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
continue
# First extract all model_names
__UpperCAmelCase = []
for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
model_names.append(UpperCamelCase__ )
else:
model_names.extend(list(UpperCamelCase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase = get_frameworks_table()
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
__UpperCAmelCase = hf_hub_download(
'''huggingface/transformers-metadata''' , '''pipeline_tags.json''' , repo_type='''dataset''' , token=UpperCamelCase__ )
__UpperCAmelCase = Dataset.from_json(UpperCamelCase__ )
__UpperCAmelCase = {
tags_dataset[i]['''model_class''']: (tags_dataset[i]['''pipeline_tag'''], tags_dataset[i]['''auto_class'''])
for i in range(len(UpperCamelCase__ ) )
}
__UpperCAmelCase = update_pipeline_and_auto_class_table(UpperCamelCase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
__UpperCAmelCase = sorted(table.keys() )
__UpperCAmelCase = pd.DataFrame(
{
'''model_class''': model_classes,
'''pipeline_tag''': [table[m][0] for m in model_classes],
'''auto_class''': [table[m][1] for m in model_classes],
} )
__UpperCAmelCase = Dataset.from_pandas(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , '''frameworks.json''' ) )
tags_dataset.to_json(os.path.join(UpperCamelCase__ , '''pipeline_tags.json''' ) )
if commit_sha is not None:
__UpperCAmelCase = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
__UpperCAmelCase = '''Update'''
upload_folder(
repo_id='''huggingface/transformers-metadata''' , folder_path=UpperCamelCase__ , repo_type='''dataset''' , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , )
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
__UpperCAmelCase = transformers_module.pipelines.SUPPORTED_TASKS
__UpperCAmelCase = []
for key in pipeline_tasks:
if key not in in_table:
__UpperCAmelCase = pipeline_tasks[key]['''pt''']
if isinstance(UpperCamelCase__ , (list, tuple) ):
__UpperCAmelCase = model[0]
__UpperCAmelCase = model.__name__
if model not in in_table.values():
missing.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
__UpperCAmelCase = ''', '''.join(UpperCamelCase__ )
raise ValueError(
'''The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '''
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
__lowerCAmelCase : Tuple = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 701
|
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowerCAmelCase : Any = ""
__lowerCAmelCase : int = ""
__lowerCAmelCase : Union[str, Any] = ""
__lowerCAmelCase : Any = 1 # (0 is vertical, 1 is horizontal)
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase = get_dataset(UpperCamelCase__ , UpperCamelCase__ )
print('''Processing...''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = update_image_and_anno(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for index, image in enumerate(UpperCamelCase__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCAmelCase = random_chars(3_2 )
__UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCAmelCase = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , UpperCamelCase__ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f"""Success {index+1}/{len(UpperCamelCase__ )} with {file_name}""" )
__UpperCAmelCase = []
for anno in new_annos[index]:
__UpperCAmelCase = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(UpperCamelCase__ )
with open(f"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = []
for label_file in glob.glob(os.path.join(UpperCamelCase__ , '''*.txt''' ) ):
__UpperCAmelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(UpperCamelCase__ ) as in_file:
__UpperCAmelCase = in_file.readlines()
__UpperCAmelCase = os.path.join(UpperCamelCase__ , f"""{label_name}.jpg""" )
__UpperCAmelCase = []
for obj_list in obj_lists:
__UpperCAmelCase = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(UpperCamelCase__ )
labels.append(UpperCamelCase__ )
return img_paths, labels
def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : int = 1 ):
"""simple docstring"""
__UpperCAmelCase = []
__UpperCAmelCase = []
__UpperCAmelCase = []
for idx in range(len(UpperCamelCase__ ) ):
__UpperCAmelCase = []
__UpperCAmelCase = img_list[idx]
path_list.append(UpperCamelCase__ )
__UpperCAmelCase = anno_list[idx]
__UpperCAmelCase = cva.imread(UpperCamelCase__ )
if flip_type == 1:
__UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
__UpperCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__UpperCAmelCase = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
__UpperCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(UpperCamelCase__ )
new_imgs_list.append(UpperCamelCase__ )
return new_imgs_list, new_annos_lists, path_list
def lowerCAmelCase ( UpperCamelCase__ : int = 3_2 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 654
| 0
|
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
snake_case_ = {
'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'},
'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'},
}
snake_case_ = {
'ctrl': 2_5_6,
}
snake_case_ = {
'Pregnancy': 1_6_8_6_2_9,
'Christianity': 7_6_7_5,
'Explain': 1_0_6_4_2_3,
'Fitness': 6_3_4_4_0,
'Saving': 6_3_1_6_3,
'Ask': 2_7_1_7_1,
'Ass': 9_5_9_8_5,
'Joke': 1_6_3_5_0_9,
'Questions': 4_5_6_2_2,
'Thoughts': 4_9_6_0_5,
'Retail': 5_2_3_4_2,
'Feminism': 1_6_4_3_3_8,
'Writing': 1_1_9_9_2,
'Atheism': 1_9_2_2_6_3,
'Netflix': 4_8_6_1_6,
'Computing': 3_9_6_3_9,
'Opinion': 4_3_2_1_3,
'Alone': 4_4_9_6_7,
'Funny': 5_8_9_1_7,
'Gaming': 4_0_3_5_8,
'Human': 4_0_8_8,
'India': 1_3_3_1,
'Joker': 7_7_1_3_8,
'Diet': 3_6_2_0_6,
'Legal': 1_1_8_5_9,
'Norman': 4_9_3_9,
'Tip': 7_2_6_8_9,
'Weight': 5_2_3_4_3,
'Movies': 4_6_2_7_3,
'Running': 2_3_4_2_5,
'Science': 2_0_9_0,
'Horror': 3_7_7_9_3,
'Confession': 6_0_5_7_2,
'Finance': 1_2_2_5_0,
'Politics': 1_6_3_6_0,
'Scary': 1_9_1_9_8_5,
'Support': 1_2_6_5_4,
'Technologies': 3_2_5_1_6,
'Teenage': 6_6_1_6_0,
'Event': 3_2_7_6_9,
'Learned': 6_7_4_6_0,
'Notion': 1_8_2_7_7_0,
'Wikipedia': 3_7_5_8_3,
'Books': 6_6_6_5,
'Extract': 7_6_0_5_0,
'Confessions': 1_0_2_7_0_1,
'Conspiracy': 7_5_9_3_2,
'Links': 6_3_6_7_4,
'Narcissus': 1_5_0_4_2_5,
'Relationship': 5_4_7_6_6,
'Relationships': 1_3_4_7_9_6,
'Reviews': 4_1_6_7_1,
'News': 4_2_5_6,
'Translation': 2_6_8_2_0,
'multilingual': 1_2_8_4_0_6,
}
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = set()
SCREAMING_SNAKE_CASE_ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ : List[Any] = char
SCREAMING_SNAKE_CASE_ : List[str] = set(a__ )
return pairs
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = CONTROL_CODES
def __init__( self , lowercase__ , lowercase__ , lowercase__="<unk>" , **lowercase__ ):
"""simple docstring"""
super().__init__(unk_token=_snake_case , **_snake_case )
with open(_snake_case , encoding="utf-8" ) as vocab_handle:
SCREAMING_SNAKE_CASE_ : str = json.load(_snake_case )
SCREAMING_SNAKE_CASE_ : Any = {v: k for k, v in self.encoder.items()}
with open(_snake_case , encoding="utf-8" ) as merges_handle:
SCREAMING_SNAKE_CASE_ : str = merges_handle.read().split("\n" )[1:-1]
SCREAMING_SNAKE_CASE_ : str = [tuple(merge.split() ) for merge in merges]
SCREAMING_SNAKE_CASE_ : Tuple = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
SCREAMING_SNAKE_CASE_ : int = {}
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return len(self.encoder )
def __lowerCamelCase ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ : Any = tuple(_snake_case )
SCREAMING_SNAKE_CASE_ : int = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
SCREAMING_SNAKE_CASE_ : Dict = get_pairs(_snake_case )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ : Optional[int] = min(_snake_case , key=lambda lowercase__ : self.bpe_ranks.get(_snake_case , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[Any] = bigram
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
while i < len(_snake_case ):
try:
SCREAMING_SNAKE_CASE_ : List[Any] = word.index(_snake_case , _snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ : List[Any] = j
if word[i] == first and i < len(_snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ : List[str] = tuple(_snake_case )
SCREAMING_SNAKE_CASE_ : List[str] = new_word
if len(_snake_case ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ : List[str] = get_pairs(_snake_case )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "@@ ".join(_snake_case )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = word[:-4]
SCREAMING_SNAKE_CASE_ : int = word
return word
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.findall(r"\S+\n?" , _snake_case )
for token in words:
split_tokens.extend(list(self.bpe(_snake_case ).split(" " ) ) )
return split_tokens
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.encoder.get(_snake_case , self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
return self.decoder.get(_snake_case , self.unk_token )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = " ".join(_snake_case ).replace("@@ " , "" ).strip()
return out_string
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None ):
"""simple docstring"""
if not os.path.isdir(_snake_case ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE_ : Tuple = os.path.join(
_snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_snake_case , ensure_ascii=_snake_case ) + "\n" )
SCREAMING_SNAKE_CASE_ : List[Any] = 0
with open(_snake_case , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
SCREAMING_SNAKE_CASE_ : Dict = token_index
writer.write(" ".join(_snake_case ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 421
|
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __A( a ):
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ) -> Tuple:
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Union[str, Any]:
'''simple docstring'''
__a = DistilBertModel(config=_snake_case )
model.to(_snake_case )
model.eval()
__a = model(_snake_case , _snake_case )
__a = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> int:
'''simple docstring'''
__a = DistilBertForMaskedLM(config=_snake_case )
model.to(_snake_case )
model.eval()
__a = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> str:
'''simple docstring'''
__a = DistilBertForQuestionAnswering(config=_snake_case )
model.to(_snake_case )
model.eval()
__a = model(
_snake_case , attention_mask=_snake_case , start_positions=_snake_case , end_positions=_snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> List[Any]:
'''simple docstring'''
__a = self.num_labels
__a = DistilBertForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
__a = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> str:
'''simple docstring'''
__a = self.num_labels
__a = DistilBertForTokenClassification(config=_snake_case )
model.to(_snake_case )
model.eval()
__a = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = self.num_choices
__a = DistilBertForMultipleChoice(config=_snake_case )
model.to(_snake_case )
model.eval()
__a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a = model(
_snake_case , attention_mask=_snake_case , labels=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
__a = self.prepare_config_and_inputs()
((__a) , (__a) , (__a) , (__a) , (__a) , (__a)) = config_and_inputs
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __A( a , a , unittest.TestCase ):
snake_case_ = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
snake_case_ = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = True
snake_case_ = True
snake_case_ = True
snake_case_ = True
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = DistilBertModelTester(self )
__a = ConfigTester(self , config_class=_snake_case , dim=37 )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_snake_case )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = DistilBertModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__a = True
__a = model_class(config=_snake_case )
__a = self._prepare_for_class(_snake_case , _snake_case )
__a = torch.jit.trace(
_snake_case , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_snake_case , os.path.join(_snake_case , '''traced_model.pt''' ) )
__a = torch.jit.load(os.path.join(_snake_case , '''traced_model.pt''' ) , map_location=_snake_case )
loaded(inputs_dict['''input_ids'''].to(_snake_case ) , inputs_dict['''attention_mask'''].to(_snake_case ) )
@require_torch
class __A( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
__a = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__a = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
__a = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__a = model(_snake_case , attention_mask=_snake_case )[0]
__a = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _snake_case )
__a = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _snake_case , atol=1E-4 ) )
| 219
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["""stage2""", """stage3""", """stage4"""] , )
__lowerCAmelCase = DetaConfig(
backbone_config=__UpperCamelCase , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=__UpperCamelCase , with_box_refine=__UpperCamelCase , two_stage=__UpperCamelCase , )
# set labels
__lowerCAmelCase = """huggingface/label-files"""
if "o365" in model_name:
__lowerCAmelCase = 366
__lowerCAmelCase = """object365-id2label.json"""
else:
__lowerCAmelCase = 91
__lowerCAmelCase = """coco-detection-id2label.json"""
__lowerCAmelCase = num_labels
__lowerCAmelCase = json.load(open(cached_download(hf_hub_url(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
__lowerCAmelCase = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.patch_embed.proj.weight""", """model.backbone.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.proj.bias""", """model.backbone.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.weight""", """model.backbone.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.bias""", """model.backbone.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.reduction.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.bias""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append(("""backbone.0.body.norm1.weight""", """model.backbone.model.hidden_states_norms.stage2.weight""") )
rename_keys.append(("""backbone.0.body.norm1.bias""", """model.backbone.model.hidden_states_norms.stage2.bias""") )
rename_keys.append(("""backbone.0.body.norm2.weight""", """model.backbone.model.hidden_states_norms.stage3.weight""") )
rename_keys.append(("""backbone.0.body.norm2.bias""", """model.backbone.model.hidden_states_norms.stage3.bias""") )
rename_keys.append(("""backbone.0.body.norm3.weight""", """model.backbone.model.hidden_states_norms.stage4.weight""") )
rename_keys.append(("""backbone.0.body.norm3.bias""", """model.backbone.model.hidden_states_norms.stage4.bias""") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", f"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", f"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", f"""model.encoder.layers.{i}.self_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", f"""model.encoder.layers.{i}.self_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", f"""model.encoder.layers.{i}.self_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", f"""model.encoder.layers.{i}.self_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.weight""", f"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""model.encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""model.encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""model.encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""model.encoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""model.encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""model.encoder.layers.{i}.final_layer_norm.bias""") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.weight""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""model.decoder.layers.{i}.self_attn.out_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""model.decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.weight""", f"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.bias""", f"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""model.decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""model.decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""model.decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""model.decoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""model.decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""model.decoder.layers.{i}.final_layer_norm.bias""") )
# fmt: on
return rename_keys
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = dct.pop(__UpperCamelCase )
__lowerCAmelCase = val
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowerCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:dim, :]
__lowerCAmelCase = in_proj_bias[: dim]
__lowerCAmelCase = in_proj_weight[
dim : dim * 2, :
]
__lowerCAmelCase = in_proj_bias[
dim : dim * 2
]
__lowerCAmelCase = in_proj_weight[
-dim :, :
]
__lowerCAmelCase = in_proj_bias[-dim :]
# fmt: on
def lowercase (_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__lowerCAmelCase = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
__lowerCAmelCase = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:hidden_size, :]
__lowerCAmelCase = in_proj_bias[:hidden_size]
__lowerCAmelCase = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[hidden_size : hidden_size * 2]
__lowerCAmelCase = in_proj_weight[-hidden_size:, :]
__lowerCAmelCase = in_proj_bias[-hidden_size:]
def lowercase ():
__lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_deta_config(__UpperCamelCase )
# load original state dict
if model_name == "deta-swin-large":
__lowerCAmelCase = hf_hub_download(repo_id="""nielsr/deta-checkpoints""" , filename="""adet_swin_ft.pth""" )
elif model_name == "deta-swin-large-o365":
__lowerCAmelCase = hf_hub_download(repo_id="""jozhang97/deta-swin-l-o365""" , filename="""deta_swin_pt_o365.pth""" )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
__lowerCAmelCase = torch.load(__UpperCamelCase , map_location="""cpu""" )["""model"""]
# original state dict
for name, param in state_dict.items():
print(__UpperCamelCase , param.shape )
# rename keys
__lowerCAmelCase = create_rename_keys(__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_swin_q_k_v(__UpperCamelCase , config.backbone_config )
read_in_decoder_q_k_v(__UpperCamelCase , __UpperCamelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__lowerCAmelCase = state_dict.pop(__UpperCamelCase )
__lowerCAmelCase = val
if "input_proj" in key:
__lowerCAmelCase = state_dict.pop(__UpperCamelCase )
__lowerCAmelCase = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__lowerCAmelCase = state_dict.pop(__UpperCamelCase )
__lowerCAmelCase = val
# finally, create HuggingFace model and load state dict
__lowerCAmelCase = DetaForObjectDetection(__UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
model.eval()
__lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
model.to(__UpperCamelCase )
# load image processor
__lowerCAmelCase = DetaImageProcessor(format="""coco_detection""" )
# verify our conversion on image
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = processor(images=__UpperCamelCase , return_tensors="""pt""" )
__lowerCAmelCase = encoding["""pixel_values"""]
__lowerCAmelCase = model(pixel_values.to(__UpperCamelCase ) )
# verify logits
print("""Logits:""" , outputs.logits[0, :3, :3] )
print("""Boxes:""" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__lowerCAmelCase = torch.tensor(
[[-7.6_308, -2.8_485, -5.3_737], [-7.2_037, -4.5_505, -4.8_027], [-7.2_943, -4.2_611, -4.6_617]] )
__lowerCAmelCase = torch.tensor([[0.4_987, 0.4_969, 0.9_999], [0.2_549, 0.5_498, 0.4_805], [0.5_498, 0.2_757, 0.0_569]] )
elif model_name == "deta-swin-large-o365":
__lowerCAmelCase = torch.tensor(
[[-8.0_122, -3.5_720, -4.9_717], [-8.1_547, -3.6_886, -4.6_389], [-7.6_610, -3.6_194, -5.0_134]] )
__lowerCAmelCase = torch.tensor([[0.2_523, 0.5_549, 0.4_881], [0.7_715, 0.4_149, 0.4_601], [0.5_503, 0.2_753, 0.0_575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__UpperCamelCase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__UpperCamelCase ) , atol=1E-4 )
print("""Everything ok!""" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
# Push to hub
if push_to_hub:
print("""Pushing model and processor to hub...""" )
model.push_to_hub(f"""jozhang97/{model_name}""" )
processor.push_to_hub(f"""jozhang97/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 721
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def A__ ( self ) -> str:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.dummy_uncond_unet
__lowerCAmelCase = ScoreSdeVeScheduler()
__lowerCAmelCase = ScoreSdeVePipeline(unet=snake_case_ , scheduler=snake_case_ )
sde_ve.to(snake_case_ )
sde_ve.set_progress_bar_config(disable=snake_case_ )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=snake_case_ ).images
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=snake_case_ , return_dict=snake_case_ )[
0
]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ) -> List[str]:
__lowerCAmelCase = """google/ncsnpp-church-256"""
__lowerCAmelCase = UNetaDModel.from_pretrained(snake_case_ )
__lowerCAmelCase = ScoreSdeVeScheduler.from_pretrained(snake_case_ )
__lowerCAmelCase = ScoreSdeVePipeline(unet=snake_case_ , scheduler=snake_case_ )
sde_ve.to(snake_case_ )
sde_ve.set_progress_bar_config(disable=snake_case_ )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=snake_case_ ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__lowerCAmelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 573
| 0
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __a ( __UpperCamelCase ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = 42
@flax_register_to_config
class __a ( nn.Module , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = 32
_lowerCamelCase : Any = 4
_lowerCamelCase : Optional[int] = 4
_lowerCamelCase : str = (
"""CrossAttnDownBlock2D""",
"""CrossAttnDownBlock2D""",
"""CrossAttnDownBlock2D""",
"""DownBlock2D""",
)
_lowerCamelCase : Tuple = ("""UpBlock2D""", """CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""")
_lowerCamelCase : Tuple = False
_lowerCamelCase : Optional[Any] = (3_20, 6_40, 12_80, 12_80)
_lowerCamelCase : Dict = 2
_lowerCamelCase : Union[str, Any] = 8
_lowerCamelCase : List[str] = None
_lowerCamelCase : Optional[int] = 12_80
_lowerCamelCase : Optional[Any] = 0.0
_lowerCamelCase : int = False
_lowerCamelCase : str = jnp.floataa
_lowerCamelCase : List[str] = True
_lowerCamelCase : str = 0
_lowerCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> FrozenDict:
'''simple docstring'''
# init input tensors
__lowercase = (1, self.in_channels, self.sample_size, self.sample_size)
__lowercase = jnp.zeros(_lowerCamelCase , dtype=jnp.floataa )
__lowercase = jnp.ones((1,) , dtype=jnp.intaa )
__lowercase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__lowercase = jax.random.split(_lowerCamelCase )
__lowercase = {'params': params_rng, 'dropout': dropout_rng}
return self.init(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )["params"]
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.block_out_channels
__lowercase = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowercase = self.num_attention_heads or self.attention_head_dim
# input
__lowercase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__lowercase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__lowercase = FlaxTimestepEmbedding(_lowerCamelCase , dtype=self.dtype )
__lowercase = self.only_cross_attention
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__lowercase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__lowercase = (num_attention_heads,) * len(self.down_block_types )
# down
__lowercase = []
__lowercase = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__lowercase = output_channel
__lowercase = block_out_channels[i]
__lowercase = i == len(_lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowercase = FlaxCrossAttnDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__lowercase = FlaxDownBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowerCamelCase )
__lowercase = down_blocks
# mid
__lowercase = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__lowercase = []
__lowercase = list(reversed(_lowerCamelCase ) )
__lowercase = list(reversed(_lowerCamelCase ) )
__lowercase = list(reversed(_lowerCamelCase ) )
__lowercase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__lowercase = output_channel
__lowercase = reversed_block_out_channels[i]
__lowercase = reversed_block_out_channels[min(i + 1 , len(_lowerCamelCase ) - 1 )]
__lowercase = i == len(_lowerCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__lowercase = FlaxCrossAttnUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__lowercase = FlaxUpBlockaD(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , prev_output_channel=_lowerCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowerCamelCase )
__lowercase = output_channel
__lowercase = up_blocks
# out
__lowercase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__lowercase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase = True , _lowerCamelCase = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
'''simple docstring'''
# 1. time
if not isinstance(_lowerCamelCase , jnp.ndarray ):
__lowercase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowercase = timesteps.astype(dtype=jnp.floataa )
__lowercase = jnp.expand_dims(_lowerCamelCase , 0 )
__lowercase = self.time_proj(_lowerCamelCase )
__lowercase = self.time_embedding(_lowerCamelCase )
# 2. pre-process
__lowercase = jnp.transpose(_lowerCamelCase , (0, 2, 3, 1) )
__lowercase = self.conv_in(_lowerCamelCase )
# 3. down
__lowercase = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__lowercase = down_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
else:
__lowercase = down_block(_lowerCamelCase , _lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__lowercase = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowerCamelCase , _lowerCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__lowercase = new_down_block_res_samples
# 4. mid
__lowercase = self.mid_block(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__lowercase = down_block_res_samples[-(self.layers_per_block + 1) :]
__lowercase = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__lowercase = up_block(
_lowerCamelCase , temb=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train , )
else:
__lowercase = up_block(_lowerCamelCase , temb=_lowerCamelCase , res_hidden_states_tuple=_lowerCamelCase , deterministic=not train )
# 6. post-process
__lowercase = self.conv_norm_out(_lowerCamelCase )
__lowercase = nn.silu(_lowerCamelCase )
__lowercase = self.conv_out(_lowerCamelCase )
__lowercase = jnp.transpose(_lowerCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowerCamelCase )
| 118
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCAmelCase : Any = logging.get_logger(__name__)
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = ["""pixel_values"""]
def __init__( self : Any , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : int = 0.9 , UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , **UpperCAmelCase : str , ) -> None:
super().__init__(**UpperCAmelCase )
lowerCamelCase__ : int = size if size is not None else {'shortest_edge': 224}
lowerCamelCase__ : Tuple = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
lowerCamelCase__ : Any = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowerCamelCase__ : str = get_size_dict(UpperCAmelCase , param_name='crop_size' )
lowerCamelCase__ : Tuple = do_resize
lowerCamelCase__ : str = size
lowerCamelCase__ : List[str] = crop_pct
lowerCamelCase__ : Any = resample
lowerCamelCase__ : Tuple = do_center_crop
lowerCamelCase__ : Any = crop_size
lowerCamelCase__ : Optional[int] = do_rescale
lowerCamelCase__ : Optional[Any] = rescale_factor
lowerCamelCase__ : Union[str, Any] = do_normalize
lowerCamelCase__ : Any = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCamelCase__ : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A_ ( self : Optional[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[float] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : int , ) -> np.ndarray:
lowerCamelCase__ : Any = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
lowerCamelCase__ : List[Any] = int(size['shortest_edge'] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowerCamelCase__ : int = int(size['height'] / crop_pct )
else:
lowerCamelCase__ : Any = (int(size['height'] / crop_pct ), int(size['width'] / crop_pct ))
else:
raise ValueError('Invalid size for resize: {}'.format(UpperCAmelCase ) )
lowerCamelCase__ : Union[str, Any] = get_resize_output_image_size(UpperCAmelCase , size=UpperCAmelCase , default_to_square=UpperCAmelCase )
else:
if "shortest_edge" in size:
lowerCamelCase__ : int = get_resize_output_image_size(UpperCAmelCase , size=size['shortest_edge'] , default_to_square=UpperCAmelCase )
elif "height" in size and "width" in size:
lowerCamelCase__ : List[Any] = (size['height'], size['width'])
else:
raise ValueError('Invalid size for resize: {}'.format(UpperCAmelCase ) )
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Optional[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : str , ) -> np.ndarray:
lowerCamelCase__ : Union[str, Any] = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(UpperCAmelCase , size=(size['height'], size['width']) , data_format=UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[Any] , ) -> int:
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Dict , ) -> np.ndarray:
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : str , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : int = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : Any , ) -> PIL.Image.Image:
lowerCamelCase__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : Any = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase__ : Any = resample if resample is not None else self.resample
lowerCamelCase__ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : Any = image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ : str = image_std if image_std is not None else self.image_std
lowerCamelCase__ : Optional[Any] = size if size is not None else self.size
lowerCamelCase__ : Optional[Any] = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
lowerCamelCase__ : List[str] = crop_size if crop_size is not None else self.crop_size
lowerCamelCase__ : int = get_size_dict(UpperCAmelCase , param_name='crop_size' )
lowerCamelCase__ : Any = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_pct is None:
raise ValueError('Crop_pct must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCamelCase__ : Dict = [to_numpy_array(UpperCAmelCase ) for image in images]
if do_resize:
lowerCamelCase__ : Any = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , crop_pct=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_center_crop:
lowerCamelCase__ : Optional[Any] = [self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase ) for image in images]
if do_rescale:
lowerCamelCase__ : int = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowerCamelCase__ : List[Any] = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowerCamelCase__ : Dict = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowerCamelCase__ : Dict = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 295
| 0
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __UpperCamelCase( _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = 3_84
if "tiny" in model_name:
UpperCAmelCase__ : List[Any] = [3, 3, 9, 3]
UpperCAmelCase__ : List[Any] = [96, 1_92, 3_84, 7_68]
if "small" in model_name:
UpperCAmelCase__ : Union[str, Any] = [3, 3, 27, 3]
UpperCAmelCase__ : Any = [96, 1_92, 3_84, 7_68]
if "base" in model_name:
UpperCAmelCase__ : List[Any] = [3, 3, 27, 3]
UpperCAmelCase__ : Dict = [1_28, 2_56, 5_12, 10_24]
UpperCAmelCase__ : Optional[int] = 5_12
if "large" in model_name:
UpperCAmelCase__ : str = [3, 3, 27, 3]
UpperCAmelCase__ : Tuple = [1_92, 3_84, 7_68, 15_36]
UpperCAmelCase__ : int = 7_68
if "xlarge" in model_name:
UpperCAmelCase__ : int = [3, 3, 27, 3]
UpperCAmelCase__ : Union[str, Any] = [2_56, 5_12, 10_24, 20_48]
UpperCAmelCase__ : Any = 10_24
# set label information
UpperCAmelCase__ : List[Any] = 1_50
UpperCAmelCase__ : Union[str, Any] = '''huggingface/label-files'''
UpperCAmelCase__ : Optional[Any] = '''ade20k-id2label.json'''
UpperCAmelCase__ : Union[str, Any] = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase__ : List[Any] = {int(_A ): v for k, v in idalabel.items()}
UpperCAmelCase__ : Tuple = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ : int = ConvNextConfig(
depths=_A , hidden_sizes=_A , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
UpperCAmelCase__ : Optional[int] = UperNetConfig(
backbone_config=_A , auxiliary_in_channels=_A , num_labels=_A , idalabel=_A , labelaid=_A , )
return config
def __UpperCamelCase( _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.stages.{i}.{j}.gamma''', F'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.depthwise_conv.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.norm.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((F'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', F'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((F'''backbone.downsample_layers.{i}.0.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.0.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.weight''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((F'''backbone.downsample_layers.{i}.1.bias''', F'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((F'''backbone.norm{i}.weight''', F'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((F'''backbone.norm{i}.bias''', F'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def __UpperCamelCase( _A : str , _A : Any , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = dct.pop(_A )
UpperCAmelCase__ : Dict = val
def __UpperCamelCase( _A : List[str] , _A : Dict , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : Any = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
UpperCAmelCase__ : Optional[int] = model_name_to_url[model_name]
UpperCAmelCase__ : Any = torch.hub.load_state_dict_from_url(_A , map_location='''cpu''' )['''state_dict''']
UpperCAmelCase__ : Union[str, Any] = get_upernet_config(_A )
UpperCAmelCase__ : str = UperNetForSemanticSegmentation(_A )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
UpperCAmelCase__ : Optional[Any] = state_dict.pop(_A )
if "bn" in key:
UpperCAmelCase__ : int = key.replace('''bn''' , '''batch_norm''' )
UpperCAmelCase__ : Union[str, Any] = val
# rename keys
UpperCAmelCase__ : int = create_rename_keys(_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
model.load_state_dict(_A )
# verify on image
UpperCAmelCase__ : str = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
UpperCAmelCase__ : Union[str, Any] = Image.open(requests.get(_A , stream=_A ).raw ).convert('''RGB''' )
UpperCAmelCase__ : Union[str, Any] = SegformerImageProcessor()
UpperCAmelCase__ : Tuple = processor(_A , return_tensors='''pt''' ).pixel_values
with torch.no_grad():
UpperCAmelCase__ : Dict = model(_A )
if model_name == "upernet-convnext-tiny":
UpperCAmelCase__ : Dict = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] )
elif model_name == "upernet-convnext-small":
UpperCAmelCase__ : Any = torch.tensor(
[[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] )
elif model_name == "upernet-convnext-base":
UpperCAmelCase__ : Dict = torch.tensor(
[[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] )
elif model_name == "upernet-convnext-large":
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] )
elif model_name == "upernet-convnext-xlarge":
UpperCAmelCase__ : Tuple = torch.tensor(
[[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] )
print('''Logits:''' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _A , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_A )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_A )
if push_to_hub:
print(F'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(F'''openmmlab/{model_name}''' )
processor.push_to_hub(F'''openmmlab/{model_name}''' )
if __name__ == "__main__":
UpperCamelCase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[f"""upernet-convnext-{size}""" for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCamelCase__ : List[Any] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 707
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__ : Tuple = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
UpperCamelCase__ : int = {
'b0': {
'hidden_dim': 1_280,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 224,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 1_280,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 240,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 1_408,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 260,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 1_536,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 300,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 1_792,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 380,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 2_048,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 456,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 2_304,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 528,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 2_560,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 600,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def __UpperCamelCase( _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = EfficientNetConfig()
UpperCAmelCase__ : int = CONFIG_MAP[model_name]['''hidden_dim''']
UpperCAmelCase__ : Optional[Any] = CONFIG_MAP[model_name]['''width_coef''']
UpperCAmelCase__ : int = CONFIG_MAP[model_name]['''depth_coef''']
UpperCAmelCase__ : Optional[int] = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase__ : int = CONFIG_MAP[model_name]['''dropout_rate''']
UpperCAmelCase__ : Any = CONFIG_MAP[model_name]['''dw_padding''']
UpperCAmelCase__ : Tuple = '''huggingface/label-files'''
UpperCAmelCase__ : Dict = '''imagenet-1k-id2label.json'''
UpperCAmelCase__ : List[Any] = 10_00
UpperCAmelCase__ : Union[str, Any] = json.load(open(hf_hub_download(_A , _A , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase__ : Union[str, Any] = {int(_A ): v for k, v in idalabel.items()}
UpperCAmelCase__ : Dict = idalabel
UpperCAmelCase__ : Tuple = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase( ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase__ : List[str] = Image.open(requests.get(_A , stream=_A ).raw )
return im
def __UpperCamelCase( _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase__ : Tuple = EfficientNetImageProcessor(
size={'''height''': size, '''width''': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=_A , )
return preprocessor
def __UpperCamelCase( _A : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [v.split('''_''' )[0].split('''block''' )[1] for v in original_param_names if v.startswith('''block''' )]
UpperCAmelCase__ : List[str] = sorted(set(_A ) )
UpperCAmelCase__ : Optional[Any] = len(_A )
UpperCAmelCase__ : int = {b: str(_A ) for b, i in zip(_A , range(_A ) )}
UpperCAmelCase__ : int = []
rename_keys.append(('''stem_conv/kernel:0''', '''embeddings.convolution.weight''') )
rename_keys.append(('''stem_bn/gamma:0''', '''embeddings.batchnorm.weight''') )
rename_keys.append(('''stem_bn/beta:0''', '''embeddings.batchnorm.bias''') )
rename_keys.append(('''stem_bn/moving_mean:0''', '''embeddings.batchnorm.running_mean''') )
rename_keys.append(('''stem_bn/moving_variance:0''', '''embeddings.batchnorm.running_var''') )
for b in block_names:
UpperCAmelCase__ : Tuple = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(('''top_conv/kernel:0''', '''encoder.top_conv.weight''') )
rename_keys.append(('''top_bn/gamma:0''', '''encoder.top_bn.weight''') )
rename_keys.append(('''top_bn/beta:0''', '''encoder.top_bn.bias''') )
rename_keys.append(('''top_bn/moving_mean:0''', '''encoder.top_bn.running_mean''') )
rename_keys.append(('''top_bn/moving_variance:0''', '''encoder.top_bn.running_var''') )
UpperCAmelCase__ : List[str] = {}
for item in rename_keys:
if item[0] in original_param_names:
UpperCAmelCase__ : str = '''efficientnet.''' + item[1]
UpperCAmelCase__ : List[str] = '''classifier.weight'''
UpperCAmelCase__ : Union[str, Any] = '''classifier.bias'''
return key_mapping
def __UpperCamelCase( _A : Optional[Any] , _A : List[Any] , _A : Optional[Any] ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
UpperCAmelCase__ : Tuple = key_mapping[key]
if "_conv" in key and "kernel" in key:
UpperCAmelCase__ : List[str] = torch.from_numpy(_A ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
UpperCAmelCase__ : Dict = torch.from_numpy(_A ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
UpperCAmelCase__ : Optional[int] = torch.from_numpy(np.transpose(_A ) )
else:
UpperCAmelCase__ : str = torch.from_numpy(_A )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_A )
@torch.no_grad()
def __UpperCamelCase( _A : Tuple , _A : str , _A : Union[str, Any] , _A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = model_classes[model_name](
include_top=_A , weights='''imagenet''' , input_tensor=_A , input_shape=_A , pooling=_A , classes=10_00 , classifier_activation='''softmax''' , )
UpperCAmelCase__ : Dict = original_model.trainable_variables
UpperCAmelCase__ : Optional[Any] = original_model.non_trainable_variables
UpperCAmelCase__ : List[str] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
UpperCAmelCase__ : Union[str, Any] = param.numpy()
UpperCAmelCase__ : Optional[Any] = list(tf_params.keys() )
# Load HuggingFace model
UpperCAmelCase__ : Union[str, Any] = get_efficientnet_config(_A )
UpperCAmelCase__ : Any = EfficientNetForImageClassification(_A ).eval()
UpperCAmelCase__ : Tuple = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('''Converting parameters...''' )
UpperCAmelCase__ : Tuple = rename_keys(_A )
replace_params(_A , _A , _A )
# Initialize preprocessor and preprocess input image
UpperCAmelCase__ : List[Any] = convert_image_processor(_A )
UpperCAmelCase__ : List[Any] = preprocessor(images=prepare_img() , return_tensors='''pt''' )
# HF model inference
hf_model.eval()
with torch.no_grad():
UpperCAmelCase__ : Optional[Any] = hf_model(**_A )
UpperCAmelCase__ : Union[str, Any] = outputs.logits.detach().numpy()
# Original model inference
UpperCAmelCase__ : str = False
UpperCAmelCase__ : int = CONFIG_MAP[model_name]['''image_size''']
UpperCAmelCase__ : List[str] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
UpperCAmelCase__ : List[Any] = image.img_to_array(_A )
UpperCAmelCase__ : List[Any] = np.expand_dims(_A , axis=0 )
UpperCAmelCase__ : List[Any] = original_model.predict(_A )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_A , _A , atol=1e-3 ), "The predicted logits are not the same."
print('''Model outputs match!''' )
if save_model:
# Create folder to save model
if not os.path.isdir(_A ):
os.mkdir(_A )
# Save converted model and image processor
hf_model.save_pretrained(_A )
preprocessor.save_pretrained(_A )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
UpperCAmelCase__ : Any = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(_A )
hf_model.push_to_hub(_A )
if __name__ == "__main__":
UpperCamelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
UpperCamelCase__ : Union[str, Any] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 496
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
UpperCAmelCase__ : str = k.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if k.startswith('''encoder''' ):
UpperCAmelCase__ : Any = k.replace('''.attn''' , '''.self_attn''' )
UpperCAmelCase__ : Union[str, Any] = k.replace('''norm1''' , '''self_attn_layer_norm''' )
UpperCAmelCase__ : int = k.replace('''norm2''' , '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
UpperCAmelCase__ : Dict = k.replace('''norm1''' , '''self_attn_layer_norm''' )
UpperCAmelCase__ : Dict = k.replace('''norm2''' , '''encoder_attn_layer_norm''' )
UpperCAmelCase__ : str = k.replace('''norm3''' , '''final_layer_norm''' )
return k
def a__ ( lowerCAmelCase__ ) -> Any:
UpperCAmelCase__ : Optional[Any] = [
'''model.encoder.layernorm_embedding.weight''',
'''model.encoder.layernorm_embedding.bias''',
'''model.decoder.layernorm_embedding.weight''',
'''model.decoder.layernorm_embedding.bias''',
]
for k in keys:
UpperCAmelCase__ : str = sd.pop(lowerCAmelCase__ )
UpperCAmelCase__ : int = k.replace('''layernorm_embedding''' , '''layer_norm''' )
assert new_k not in sd
UpperCAmelCase__ : Any = v
UpperCamelCase__ = ['''START''']
@torch.no_grad()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
UpperCAmelCase__ : Optional[int] = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
UpperCAmelCase__ : Union[str, Any] = model['''model''']
UpperCAmelCase__ : Dict = BlenderbotConfig.from_json_file(lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = BlenderbotForConditionalGeneration(lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = m.model.state_dict().keys()
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : Union[str, Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
UpperCAmelCase__ : Union[str, Any] = rename_state_dict_key(lowerCAmelCase__ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
UpperCAmelCase__ : Dict = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(lowerCAmelCase__ )
m.model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
m.half()
m.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
UpperCamelCase__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 75
|
'''simple docstring'''
import functools
def a_ ( _UpperCAmelCase : list[int] ,_UpperCAmelCase : list[int] ) -> int:
# Validation
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) or not all(isinstance(_UpperCAmelCase ,_UpperCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(_UpperCAmelCase ) != 3 or not all(isinstance(_UpperCAmelCase ,_UpperCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(_UpperCAmelCase ) == 0:
return 0
if min(_UpperCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(_UpperCAmelCase ) >= 3_66:
raise ValueError('All days elements should be less than 366' )
__snake_case : str = set(_UpperCAmelCase )
@functools.cache
def dynamic_programming(_UpperCAmelCase : int ) -> int:
if index > 3_65:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) ,costs[1] + dynamic_programming(index + 7 ) ,costs[2] + dynamic_programming(index + 30 ) ,)
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286
| 0
|
def a_ (_lowerCAmelCase : int = 600851475143 )-> int:
try:
snake_case: List[str] = int(_lowerCAmelCase )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
snake_case: int = 1
snake_case: Any = 2
while i * i <= n:
while n % i == 0:
snake_case: str = i
n //= i
i += 1
if n > 1:
snake_case: List[Any] = n
return int(_lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 701
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
__lowerCAmelCase : List[Any] = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
__lowerCAmelCase : str = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def a_ (_lowerCAmelCase : Optional[Any] )-> Optional[int]:
snake_case: Dict = (images / 2 + 0.5).clamp(0 , 1 )
snake_case: Optional[int] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case: int = numpy_to_pil(_lowerCAmelCase )
return images
def a_ (_lowerCAmelCase : Union[str, Any] )-> Dict:
if images.ndim == 3:
snake_case: List[Any] = images[None, ...]
snake_case: str = (images * 255).round().astype("""uint8""" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
snake_case: int = [Image.fromarray(image.squeeze() , mode="""L""" ) for image in images]
else:
snake_case: Dict = [Image.fromarray(_lowerCAmelCase ) for image in images]
return pil_images
| 164
| 0
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = args.pruning_method
lowercase__ = args.threshold
lowercase__ = args.model_name_or_path.rstrip('''/''' )
lowercase__ = args.target_model_path
print(f'Load fine-pruned model from {model_name_or_path}' )
lowercase__ = torch.load(os.path.join(SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) )
lowercase__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowercase__ = tensor
print(f'Copied layer {name}' )
elif "classifier" in name or "qa_output" in name:
lowercase__ = tensor
print(f'Copied layer {name}' )
elif "bias" in name:
lowercase__ = tensor
print(f'Copied layer {name}' )
else:
if pruning_method == "magnitude":
lowercase__ = MagnitudeBinarizer.apply(inputs=SCREAMING_SNAKE_CASE , threshold=SCREAMING_SNAKE_CASE )
lowercase__ = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowercase__ = name[:-6]
lowercase__ = model[f'{prefix_}mask_scores']
lowercase__ = TopKBinarizer.apply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowercase__ = name[:-6]
lowercase__ = model[f'{prefix_}mask_scores']
lowercase__ = ThresholdBinarizer.apply(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowercase__ = name[:-6]
lowercase__ = model[f'{prefix_}mask_scores']
lowercase__ , lowercase__ = -0.1, 1.1
lowercase__ = torch.sigmoid(SCREAMING_SNAKE_CASE )
lowercase__ = s * (r - l) + l
lowercase__ = s_bar.clamp(min=0.0 , max=1.0 )
lowercase__ = tensor * mask
print(f'Pruned layer {name}' )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
lowercase__ = os.path.join(
os.path.dirname(SCREAMING_SNAKE_CASE ) , f'bertarized_{os.path.basename(SCREAMING_SNAKE_CASE )}' )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
shutil.copytree(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f'\nCreated folder {target_model_path}' )
torch.save(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
lowerCAmelCase = parser.parse_args()
main(args)
| 43
|
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = mock.Mock()
snake_case_ = 500
snake_case_ = {}
snake_case_ = HTTPError
snake_case_ = {}
# Download this model to make sure it's in the cache.
snake_case_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=a__ ) as mock_head:
snake_case_ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = mock.Mock()
snake_case_ = 500
snake_case_ = {}
snake_case_ = HTTPError
snake_case_ = {}
# Download this model to make sure it's in the cache.
snake_case_ = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=a__ ) as mock_head:
snake_case_ = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
try:
snake_case_ = tempfile.mktemp()
with open(a__ , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , a__ )
snake_case_ = AlbertTokenizer.from_pretrained(a__ )
finally:
os.remove(a__ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , a__ )
snake_case_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class _snake_case ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def lowerCAmelCase__ ( cls ) -> List[str]:
'''simple docstring'''
snake_case_ = TOKEN
HfFolder.save_token(a__ )
@classmethod
def lowerCAmelCase__ ( cls ) -> Any:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(a__ , "vocab.txt" )
with open(a__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case_ = BertTokenizer(a__ )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
snake_case_ = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(a__ , repo_id="test-tokenizer" , push_to_hub=a__ , use_auth_token=self._token )
snake_case_ = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(a__ , "vocab.txt" )
with open(a__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case_ = BertTokenizer(a__ )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
snake_case_ = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
a__ , repo_id="valid_org/test-tokenizer-org" , push_to_hub=a__ , use_auth_token=self._token )
snake_case_ = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(a__ , "vocab.txt" )
with open(a__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case_ = CustomTokenizer(a__ )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
snake_case_ = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=a__ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ = os.path.join(a__ , "vocab.txt" )
with open(a__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case_ = BertTokenizerFast.from_pretrained(a__ )
bert_tokenizer.save_pretrained(a__ )
snake_case_ = CustomTokenizerFast.from_pretrained(a__ )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
snake_case_ = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=a__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
snake_case_ = AutoTokenizer.from_pretrained(
F'{USER}/test-dynamic-tokenizer' , use_fast=a__ , trust_remote_code=a__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = Trie()
snake_case_ = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(a__ , ["AB", "C"] )
| 400
| 0
|
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(_SCREAMING_SNAKE_CASE ):
return ext
raise Exception(
f'Unable to determine file format from file extension {path}. '
f'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' )
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : int =pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
lowerCAmelCase_ : Optional[Any] =try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format
lowerCAmelCase_ : Any =PipelineDataFormat.from_str(
format=_SCREAMING_SNAKE_CASE , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
class _snake_case ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase_ : Pipeline , UpperCamelCase_ : PipelineDataFormat ):
lowerCAmelCase_ : List[Any] =nlp
lowerCAmelCase_ : Optional[Any] =reader
@staticmethod
def __A ( UpperCamelCase_ : ArgumentParser ):
lowerCAmelCase_ : str =parser.add_parser('''run''' , help='''Run a pipeline through the CLI''' )
run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''' )
run_parser.add_argument('''--input''' , type=UpperCamelCase_ , help='''Path to the file to use for inference''' )
run_parser.add_argument('''--output''' , type=UpperCamelCase_ , help='''Path to the file that will be used post to write results.''' )
run_parser.add_argument('''--model''' , type=UpperCamelCase_ , help='''Name or path to the model to instantiate.''' )
run_parser.add_argument('''--config''' , type=UpperCamelCase_ , help='''Name or path to the model\'s config to instantiate.''' )
run_parser.add_argument(
'''--tokenizer''' , type=UpperCamelCase_ , help='''Name of the tokenizer to use. (default: same as the model name)''' )
run_parser.add_argument(
'''--column''' , type=UpperCamelCase_ , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , )
run_parser.add_argument(
'''--format''' , type=UpperCamelCase_ , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , )
run_parser.add_argument(
'''--device''' , type=UpperCamelCase_ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''' )
run_parser.set_defaults(func=UpperCamelCase_ )
def __A ( self : Optional[int] ):
lowerCAmelCase_ : Any =self._nlp, []
for entry in self._reader:
lowerCAmelCase_ : List[Any] =nlp(**UpperCamelCase_ ) if self._reader.is_multi_columns else nlp(UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
outputs.append(UpperCamelCase_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
lowerCAmelCase_ : Tuple =self._reader.save_binary(UpperCamelCase_ )
logger.warning(F'Current pipeline requires output to be in binary format, saving at {binary_path}' )
else:
self._reader.save(UpperCamelCase_ )
| 701
|
'''simple docstring'''
import functools
def SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Validation
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for day in days ):
raise ValueError('''The parameter days should be a list of integers''' )
if len(_SCREAMING_SNAKE_CASE ) != 3 or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for cost in costs ):
raise ValueError('''The parameter costs should be a list of three integers''' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return 0
if min(_SCREAMING_SNAKE_CASE ) <= 0:
raise ValueError('''All days elements should be greater than 0''' )
if max(_SCREAMING_SNAKE_CASE ) >= 366:
raise ValueError('''All days elements should be less than 366''' )
lowerCAmelCase_ : List[str] =set(_SCREAMING_SNAKE_CASE )
@functools.cache
def dynamic_programming(_SCREAMING_SNAKE_CASE ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 305
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = '''donut-swin'''
UpperCamelCase_ : List[str] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[int]=224 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Union[str, Any]=96 , UpperCAmelCase_ : Union[str, Any]=[2, 2, 6, 2] , UpperCAmelCase_ : int=[3, 6, 12, 24] , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Tuple=4.0 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Tuple=0.0 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[Any]=1E-5 , **UpperCAmelCase_ : Any , ):
super().__init__(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = image_size
SCREAMING_SNAKE_CASE : Optional[int] = patch_size
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : int = embed_dim
SCREAMING_SNAKE_CASE : Union[str, Any] = depths
SCREAMING_SNAKE_CASE : Any = len(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = num_heads
SCREAMING_SNAKE_CASE : List[str] = window_size
SCREAMING_SNAKE_CASE : Optional[int] = mlp_ratio
SCREAMING_SNAKE_CASE : Union[str, Any] = qkv_bias
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = drop_path_rate
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : List[Any] = use_absolute_embeddings
SCREAMING_SNAKE_CASE : str = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : int = int(embed_dim * 2 ** (len(UpperCAmelCase_ ) - 1) )
| 62
|
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : int = 6 ):
SCREAMING_SNAKE_CASE : Node | None = None
SCREAMING_SNAKE_CASE : Node | None = None
self.create_linked_list(UpperCAmelCase_ )
def _A ( self : List[Any] , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE : Optional[int] = Node()
SCREAMING_SNAKE_CASE : str = current_node
SCREAMING_SNAKE_CASE : Optional[int] = current_node
SCREAMING_SNAKE_CASE : Optional[Any] = current_node
for _ in range(1 , UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : Tuple = Node()
SCREAMING_SNAKE_CASE : Dict = current_node
SCREAMING_SNAKE_CASE : Optional[Any] = previous_node
SCREAMING_SNAKE_CASE : Optional[Any] = current_node
SCREAMING_SNAKE_CASE : Union[str, Any] = self.front
SCREAMING_SNAKE_CASE : List[str] = previous_node
def _A ( self : Union[str, Any] ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def _A ( self : Optional[int] ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def _A ( self : Optional[int] , UpperCAmelCase_ : Any ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
SCREAMING_SNAKE_CASE : List[str] = self.rear.next
if self.rear:
SCREAMING_SNAKE_CASE : Dict = data
def _A ( self : List[str] ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
SCREAMING_SNAKE_CASE : List[str] = self.front.data
SCREAMING_SNAKE_CASE : Optional[int] = None
return data
SCREAMING_SNAKE_CASE : List[str] = self.front
SCREAMING_SNAKE_CASE : List[str] = old_front.next
SCREAMING_SNAKE_CASE : Optional[int] = old_front.data
SCREAMING_SNAKE_CASE : List[str] = None
return data
def _A ( self : Any ):
if self.is_empty():
raise Exception("Empty Queue" )
def _A ( self : Optional[Any] ):
if self.rear and self.rear.next == self.front:
raise Exception("Full Queue" )
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Any | None = None
SCREAMING_SNAKE_CASE : Node | None = None
SCREAMING_SNAKE_CASE : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62
| 1
|
import math
from numpy import inf
from scipy.integrate import quad
def UpperCamelCase__ ( UpperCAmelCase ) -> float:
if num <= 0:
raise ValueError('''math domain error''' )
return quad(__UpperCAmelCase , 0 , __UpperCAmelCase , args=(__UpperCAmelCase) )[0]
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> float:
return math.pow(__UpperCAmelCase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 713
|
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {'vocab_file': 'vocab.txt'}
__lowerCamelCase = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
__lowerCamelCase = {
'facebook/esm2_t6_8M_UR50D': 1_024,
'facebook/esm2_t12_35M_UR50D': 1_024,
}
def UpperCamelCase__ ( UpperCAmelCase ) -> int:
"""simple docstring"""
with open(UpperCAmelCase , '''r''' ) as f:
_a : List[str] = f.read().splitlines()
return [l.strip() for l in lines]
class UpperCamelCase_ ( UpperCamelCase ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['''input_ids''', '''attention_mask''']
def __init__( self , lowercase , lowercase="<unk>" , lowercase="<cls>" , lowercase="<pad>" , lowercase="<mask>" , lowercase="<eos>" , **lowercase , ) -> Dict:
super().__init__(**lowercase )
_a : Optional[Any] = load_vocab_file(lowercase )
_a : str = dict(enumerate(self.all_tokens ) )
_a : Any = {tok: ind for ind, tok in enumerate(self.all_tokens )}
_a : List[Any] = unk_token
_a : Dict = cls_token
_a : Tuple = pad_token
_a : List[Any] = mask_token
_a : List[str] = eos_token
_a : Union[str, Any] = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def snake_case__( self , lowercase ) -> str:
return self._id_to_token.get(lowercase , self.unk_token )
def snake_case__( self , lowercase ) -> int:
return self._token_to_id.get(lowercase , self._token_to_id.get(self.unk_token ) )
def snake_case__( self , lowercase , **lowercase ) -> Optional[Any]:
return text.split()
def snake_case__( self , lowercase=False ) -> Dict:
return len(self._id_to_token )
def snake_case__( self ) -> int:
return {token: i for i, token in enumerate(self.all_tokens )}
def snake_case__( self , lowercase ) -> int:
return self._token_to_id.get(lowercase , self._token_to_id.get(self.unk_token ) )
def snake_case__( self , lowercase ) -> str:
return self._id_to_token.get(lowercase , self.unk_token )
def snake_case__( self , lowercase , lowercase = None ) -> List[int]:
_a : List[str] = [self.cls_token_id]
_a : Dict = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError('''Cannot tokenize multiple sequences when EOS token is not set!''' )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def snake_case__( self , lowercase , lowercase = None , lowercase = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
_a : Any = [1] + ([0] * len(lowercase )) + [1]
if token_ids_a is not None:
mask += [0] * len(lowercase ) + [1]
return mask
def snake_case__( self , lowercase , lowercase ) -> Tuple:
_a : List[Any] = os.path.join(lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + '''vocab.txt''' )
with open(lowercase , '''w''' ) as f:
f.write('''\n'''.join(self.all_tokens ) )
return (vocab_file,)
@property
def snake_case__( self ) -> int:
return self.get_vocab_size(with_added_tokens=lowercase )
def snake_case__( self , lowercase , lowercase = False ) -> int:
return super()._add_tokens(lowercase , special_tokens=lowercase )
| 307
| 0
|
'''simple docstring'''
def snake_case_ ( _lowerCAmelCase : int = 1000 ) -> int:
UpperCAmelCase : List[str] = -1
UpperCAmelCase : int = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
UpperCAmelCase : Any = (n * n - 2 * a * n) // (2 * n - 2 * a)
UpperCAmelCase : List[str] = n - a - b
if c * c == (a * a + b * b):
UpperCAmelCase : Union[str, Any] = a * b * c
if candidate >= product:
UpperCAmelCase : Any = candidate
return product
if __name__ == "__main__":
print(F"{solution() = }")
| 127
|
'''simple docstring'''
def snake_case_ ( _lowerCAmelCase : int ) -> list:
UpperCAmelCase : Union[str, Any] = int(_lowerCAmelCase )
if n_element < 1:
UpperCAmelCase : int = ValueError('''a should be a positive number''' )
raise my_error
UpperCAmelCase : str = [1]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = (0, 0, 0)
UpperCAmelCase : Any = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
UpperCamelCase__: List[str] = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
UpperCamelCase__: str = hamming(int(n))
print("-----------------------------------------------------")
print(F"The list with nth numbers is: {hamming_numbers}")
print("-----------------------------------------------------")
| 127
| 1
|
import os
def _UpperCAmelCase ( ):
with open(os.path.dirname(SCREAMING_SNAKE_CASE__ ) + '/p022_names.txt' ) as file:
__UpperCamelCase =str(file.readlines()[0] )
__UpperCamelCase =names.replace('"' , '' ).split(',' )
names.sort()
__UpperCamelCase =0
__UpperCamelCase =0
for i, name in enumerate(SCREAMING_SNAKE_CASE__ ):
for letter in name:
name_score += ord(SCREAMING_SNAKE_CASE__ ) - 64
total_score += (i + 1) * name_score
__UpperCamelCase =0
return total_score
if __name__ == "__main__":
print(solution())
| 682
|
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flatten_dict(SCREAMING_SNAKE_CASE__ )
return flax_params
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase ={}
__UpperCamelCase ={
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
__UpperCamelCase ={
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__UpperCamelCase ='.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flax_dict[key]
__UpperCamelCase ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__UpperCamelCase =torch.from_numpy(converted_dict[key].T )
else:
__UpperCamelCase =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : str=False ):
__UpperCamelCase =get_flax_param(SCREAMING_SNAKE_CASE__ )
if not use_large:
__UpperCamelCase =PixaStructVisionConfig()
__UpperCamelCase =PixaStructTextConfig()
else:
__UpperCamelCase =PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
__UpperCamelCase =PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
__UpperCamelCase =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
__UpperCamelCase =PixaStructImageProcessor()
__UpperCamelCase =PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
if use_large:
__UpperCamelCase =40_96
__UpperCamelCase =True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
print('Model saved in {}'.format(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
_A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 682
| 1
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Dict = "laptop" ) -> DataFrame:
SCREAMING_SNAKE_CASE_ : str =f'https://www.amazon.in/laptop/s?k={product}'
SCREAMING_SNAKE_CASE_ : Union[str, Any] ={
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
SCREAMING_SNAKE_CASE_ : List[str] =BeautifulSoup(requests.get(__lowerCAmelCase , headers=__lowerCAmelCase ).text )
# Initialize a Pandas dataframe with the column titles
SCREAMING_SNAKE_CASE_ : Optional[int] =DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
SCREAMING_SNAKE_CASE_ : str =item.ha.text
SCREAMING_SNAKE_CASE_ : Union[str, Any] ='''https://www.amazon.in/''' + item.ha.a['''href''']
SCREAMING_SNAKE_CASE_ : int =item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
SCREAMING_SNAKE_CASE_ : List[Any] ='''Not available'''
try:
SCREAMING_SNAKE_CASE_ : Tuple =(
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
SCREAMING_SNAKE_CASE_ : List[str] =''''''
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 1_0_0 )
except ValueError:
SCREAMING_SNAKE_CASE_ : str =float('''nan''' )
except AttributeError:
pass
SCREAMING_SNAKE_CASE_ : Dict =[
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
SCREAMING_SNAKE_CASE_ : Any =''' '''
SCREAMING_SNAKE_CASE_ : Optional[Any] =''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_lowercase = """headphones"""
get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv")
| 443
|
from __future__ import annotations
from statistics import mean
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
snake_case__ = [0] * no_of_processes
snake_case__ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowerCAmelCase ):
snake_case__ = burst_time[i]
snake_case__ = []
snake_case__ = 0
snake_case__ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
snake_case__ = []
snake_case__ = -1
for i in range(__lowerCAmelCase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
snake_case__ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
snake_case__ = i
total_time += burst_time[target_process]
completed += 1
snake_case__ = 0
snake_case__ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
snake_case__ = [0] * no_of_processes
for i in range(__lowerCAmelCase ):
snake_case__ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
lowerCamelCase__ : Tuple = 4
lowerCamelCase__ : Union[str, Any] = [2, 5, 3, 7]
lowerCamelCase__ : Optional[Any] = [0, 0, 0, 0]
lowerCamelCase__ : Dict = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCamelCase__ : Union[str, Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 33
| 0
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _lowercase (unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = "hf-internal-testing/tiny-random-t5"
UpperCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase__ )
UpperCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ )
UpperCamelCase_ = tokenizer("This is me" , return_tensors="pt" )
UpperCamelCase_ = model.to_bettertransformer()
self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
UpperCamelCase_ = model.generate(**UpperCamelCase__ )
UpperCamelCase_ = model.reverse_bettertransformer()
self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCamelCase__ )
UpperCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ )
self.assertFalse(
any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
UpperCamelCase_ = model_reloaded.generate(**UpperCamelCase__ )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ ) )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = "hf-internal-testing/tiny-random-t5"
UpperCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ )
UpperCamelCase_ = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(UpperCamelCase__ ):
model.save_pretrained(UpperCamelCase__ )
UpperCamelCase_ = model.reverse_bettertransformer()
model.save_pretrained(UpperCamelCase__ )
| 710
|
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _lowerCAmelCase (_lowerCAmelCase):
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name)
UpperCAmelCase : Dict ="""
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class _lowercase (a_ ):
'''simple docstring'''
@staticmethod
def _lowerCamelCase ( snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = parser.add_parser(
"convert" , help="CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints." , )
train_parser.add_argument("--model_type" , type=snake_case__ , required=snake_case__ , help="Model's type." )
train_parser.add_argument(
"--tf_checkpoint" , type=snake_case__ , required=snake_case__ , help="TensorFlow checkpoint path or folder." )
train_parser.add_argument(
"--pytorch_dump_output" , type=snake_case__ , required=snake_case__ , help="Path to the PyTorch saved model output." )
train_parser.add_argument("--config" , type=snake_case__ , default="" , help="Configuration file path or folder." )
train_parser.add_argument(
"--finetuning_task_name" , type=snake_case__ , default=snake_case__ , help="Optional fine-tuning task name if the TF model was a finetuned model." , )
train_parser.set_defaults(func=snake_case__ )
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , *snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = logging.get_logger("transformers-cli/converting" )
self._logger.info(F"""Loading model {model_type}""" )
UpperCamelCase_ = model_type
UpperCamelCase_ = tf_checkpoint
UpperCamelCase_ = pytorch_dump_output
UpperCamelCase_ = config
UpperCamelCase_ = finetuning_task_name
def _lowerCamelCase ( self ):
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(snake_case__ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
if "ckpt" in self._tf_checkpoint.lower():
UpperCamelCase_ = self._tf_checkpoint
UpperCamelCase_ = ""
else:
UpperCamelCase_ = self._tf_checkpoint
UpperCamelCase_ = ""
convert_transfo_xl_checkpoint_to_pytorch(
snake_case__ , self._config , self._pytorch_dump_output , snake_case__ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(snake_case__ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]" )
| 504
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
__UpperCAmelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
__UpperCAmelCase = {
'''unc-nlp/lxmert-base-uncased''': 512,
}
__UpperCAmelCase = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class lowerCAmelCase_ ( _lowercase ):
UpperCAmelCase__ : Any = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Dict = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Any = LxmertTokenizer
def __init__( self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="[UNK]", SCREAMING_SNAKE_CASE_="[SEP]", SCREAMING_SNAKE_CASE_="[PAD]", SCREAMING_SNAKE_CASE_="[CLS]", SCREAMING_SNAKE_CASE_="[MASK]", SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> int:
super().__init__(
__UpperCAmelCase, tokenizer_file=__UpperCAmelCase, do_lower_case=__UpperCAmelCase, unk_token=__UpperCAmelCase, sep_token=__UpperCAmelCase, pad_token=__UpperCAmelCase, cls_token=__UpperCAmelCase, mask_token=__UpperCAmelCase, tokenize_chinese_chars=__UpperCAmelCase, strip_accents=__UpperCAmelCase, **__UpperCAmelCase, )
UpperCamelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase', __UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents', __UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars', __UpperCAmelCase ) != tokenize_chinese_chars
):
UpperCamelCase : Dict = getattr(__UpperCAmelCase, normalizer_state.pop('type' ) )
UpperCamelCase : Dict = do_lower_case
UpperCamelCase : List[Any] = strip_accents
UpperCamelCase : List[str] = tokenize_chinese_chars
UpperCamelCase : Dict = normalizer_class(**__UpperCAmelCase )
UpperCamelCase : List[str] = do_lower_case
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> str:
UpperCamelCase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : str = [self.sep_token_id]
UpperCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCamelCase : List[str] = self._tokenizer.model.save(__UpperCAmelCase, name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 40
|
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case_ = logging.get_logger(__name__)
class a__ ( _lowercase ):
__magic_name__ : Tuple = ["pixel_values"]
def __init__(self : Tuple, __UpperCAmelCase : bool = True, __UpperCAmelCase : Dict[str, int] = None, __UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC, __UpperCAmelCase : bool = True, __UpperCAmelCase : Dict[str, int] = None, __UpperCAmelCase : bool = True, __UpperCAmelCase : Union[int, float] = 1 / 255, __UpperCAmelCase : bool = True, __UpperCAmelCase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN, __UpperCAmelCase : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD, **__UpperCAmelCase : Dict, ) -> None:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE : str = size if size is not None else {'''shortest_edge''': 224}
SCREAMING_SNAKE_CASE : List[str] = get_size_dict(__UpperCAmelCase, default_to_square=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
SCREAMING_SNAKE_CASE : Dict = get_size_dict(__UpperCAmelCase, param_name='''crop_size''' )
SCREAMING_SNAKE_CASE : Optional[int] = do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size
SCREAMING_SNAKE_CASE : Any = resample
SCREAMING_SNAKE_CASE : int = do_center_crop
SCREAMING_SNAKE_CASE : str = crop_size
SCREAMING_SNAKE_CASE : int = do_rescale
SCREAMING_SNAKE_CASE : int = rescale_factor
SCREAMING_SNAKE_CASE : str = do_normalize
SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
SCREAMING_SNAKE_CASE : str = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase__ (self : List[str], __UpperCAmelCase : np.ndarray, __UpperCAmelCase : Dict[str, int], __UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC, __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None, **__UpperCAmelCase : List[str], ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(__UpperCAmelCase, default_to_square=__UpperCAmelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE : Any = int((256 / 224) * size['''shortest_edge'''] )
SCREAMING_SNAKE_CASE : List[Any] = get_resize_output_image_size(__UpperCAmelCase, size=__UpperCAmelCase, default_to_square=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : int = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' )
return resize(
__UpperCAmelCase, size=(size_dict['''height'''], size_dict['''width''']), resample=__UpperCAmelCase, data_format=__UpperCAmelCase, **__UpperCAmelCase )
def lowercase__ (self : Union[str, Any], __UpperCAmelCase : np.ndarray, __UpperCAmelCase : Dict[str, int], __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None, **__UpperCAmelCase : List[str], ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(__UpperCAmelCase, size=(size['''height'''], size['''width''']), data_format=__UpperCAmelCase, **__UpperCAmelCase )
def lowercase__ (self : Any, __UpperCAmelCase : np.ndarray, __UpperCAmelCase : Union[int, float], __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None, **__UpperCAmelCase : List[str], ) -> np.ndarray:
"""simple docstring"""
return rescale(__UpperCAmelCase, scale=__UpperCAmelCase, data_format=__UpperCAmelCase, **__UpperCAmelCase )
def lowercase__ (self : Optional[int], __UpperCAmelCase : np.ndarray, __UpperCAmelCase : Union[float, List[float]], __UpperCAmelCase : Union[float, List[float]], __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None, **__UpperCAmelCase : Tuple, ) -> np.ndarray:
"""simple docstring"""
return normalize(__UpperCAmelCase, mean=__UpperCAmelCase, std=__UpperCAmelCase, data_format=__UpperCAmelCase, **__UpperCAmelCase )
def lowercase__ (self : List[Any], __UpperCAmelCase : ImageInput, __UpperCAmelCase : Optional[bool] = None, __UpperCAmelCase : Optional[Dict[str, int]] = None, __UpperCAmelCase : PILImageResampling = None, __UpperCAmelCase : Optional[bool] = None, __UpperCAmelCase : Optional[Dict[str, int]] = None, __UpperCAmelCase : Optional[bool] = None, __UpperCAmelCase : Optional[float] = None, __UpperCAmelCase : Optional[bool] = None, __UpperCAmelCase : Optional[Union[float, Iterable[float]]] = None, __UpperCAmelCase : Optional[Union[float, Iterable[float]]] = None, __UpperCAmelCase : Optional[TensorType] = None, __UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST, **__UpperCAmelCase : Any, ) -> BatchFeature:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : str = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : List[str] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : Any = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Optional[int] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Optional[Any] = get_size_dict(__UpperCAmelCase, default_to_square=__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : Any = get_size_dict(__UpperCAmelCase, param_name='''crop_size''' )
SCREAMING_SNAKE_CASE : Optional[int] = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Dict = [to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : int = [self.resize(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE : Dict = [self.center_crop(__UpperCAmelCase, __UpperCAmelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : str = [self.rescale(__UpperCAmelCase, __UpperCAmelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : str = [self.normalize(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE : Dict = [to_channel_dimension_format(__UpperCAmelCase, __UpperCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE : Dict = {'''pixel_values''': images}
return BatchFeature(data=__UpperCAmelCase, tensor_type=__UpperCAmelCase )
| 507
| 0
|
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = 1 # (0 is vertical, 1 is horizontal)
def _A ( ):
"""simple docstring"""
__lowercase , __lowercase = get_dataset(A__ , A__ )
print('''Processing...''' )
__lowercase , __lowercase , __lowercase = update_image_and_anno(A__ , A__ , A__ )
for index, image in enumerate(A__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__lowercase = random_chars(32 )
__lowercase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__lowercase = F"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(F"/{file_root}.jpg" , A__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Success {index+1}/{len(A__ )} with {file_name}" )
__lowercase = []
for anno in new_annos[index]:
__lowercase = F"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(A__ )
with open(F"/{file_root}.txt" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def _A ( A__ , A__ ):
"""simple docstring"""
__lowercase = []
__lowercase = []
for label_file in glob.glob(os.path.join(A__ , '''*.txt''' ) ):
__lowercase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(A__ ) as in_file:
__lowercase = in_file.readlines()
__lowercase = os.path.join(A__ , F"{label_name}.jpg" )
__lowercase = []
for obj_list in obj_lists:
__lowercase = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(A__ )
labels.append(A__ )
return img_paths, labels
def _A ( A__ , A__ , A__ = 1 ):
"""simple docstring"""
__lowercase = []
__lowercase = []
__lowercase = []
for idx in range(len(A__ ) ):
__lowercase = []
__lowercase = img_list[idx]
path_list.append(A__ )
__lowercase = anno_list[idx]
__lowercase = cva.imread(A__ )
if flip_type == 1:
__lowercase = cva.flip(A__ , A__ )
for bbox in img_annos:
__lowercase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__lowercase = cva.flip(A__ , A__ )
for bbox in img_annos:
__lowercase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(A__ )
new_imgs_list.append(A__ )
return new_imgs_list, new_annos_lists, path_list
def _A ( A__ = 32 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
__lowercase = ascii_lowercase + digits
return "".join(random.choice(A__ ) for _ in range(A__ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 624
|
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
lowerCAmelCase__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
lowerCAmelCase__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
lowerCAmelCase__ = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) ,reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] ,)
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : List[Any] ,lowercase__ : List[Any] ,lowercase__ : Union[str, Any]=False ):
__lowercase = spearmanr(lowercase__ ,lowercase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 624
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ ={
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ =['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ =[
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__magic_name__ =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 415
|
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return "".join(chr(ord(SCREAMING_SNAKE_CASE__ ) - 32 ) if """a""" <= char <= """z""" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 533
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowerCAmelCase :
lowerCAmelCase__ = LEDConfig
lowerCAmelCase__ = {}
lowerCAmelCase__ = """gelu"""
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=False , a__=99 , a__=32 , a__=2 , a__=4 , a__=37 , a__=0.1 , a__=0.1 , a__=20 , a__=2 , a__=1 , a__=0 , a__=4 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_UpperCAmelCase = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_UpperCAmelCase = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __A ( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_UpperCAmelCase = prepare_led_inputs_dict(a__ , a__ , a__ )
_UpperCAmelCase = tf.concat(
[tf.zeros_like(a__ )[:, :-1], tf.ones_like(a__ )[:, -1:]] , axis=-1 , )
_UpperCAmelCase = global_attention_mask
return config, inputs_dict
def __A ( self , a__ , a__ ):
_UpperCAmelCase = TFLEDModel(config=a__ ).get_decoder()
_UpperCAmelCase = inputs_dict['input_ids']
_UpperCAmelCase = input_ids[:1, :]
_UpperCAmelCase = inputs_dict['attention_mask'][:1, :]
_UpperCAmelCase = 1
# first forward pass
_UpperCAmelCase = model(a__ , attention_mask=a__ , use_cache=a__ )
_UpperCAmelCase , _UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCAmelCase = model(a__ , attention_mask=a__ )[0]
_UpperCAmelCase = model(a__ , attention_mask=a__ , past_key_values=a__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
_UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a__ , a__ , rtol=1E-3 )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE=None,SCREAMING_SNAKE_CASE=None,SCREAMING_SNAKE_CASE=None,SCREAMING_SNAKE_CASE=None,) -> Union[str, Any]:
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE,config.pad_token_id ),tf.inta )
if decoder_attention_mask is None:
_UpperCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:],config.pad_token_id ),tf.inta ),
],axis=-1,)
if head_mask is None:
_UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowerCAmelCase ( snake_case , snake_case , unittest.TestCase ):
lowerCAmelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowerCAmelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase__ = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def __A ( self ):
_UpperCAmelCase = TFLEDModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=a__ )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a__ )
def __A ( self ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = tf.zeros_like(inputs_dict['attention_mask'] )
_UpperCAmelCase = 2
_UpperCAmelCase = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
_UpperCAmelCase = True
_UpperCAmelCase = self.model_tester.seq_length
_UpperCAmelCase = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(a__ ):
_UpperCAmelCase = outputs.decoder_attentions
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(a__ ):
_UpperCAmelCase = [t.numpy() for t in outputs.encoder_attentions]
_UpperCAmelCase = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(a__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = model_class(a__ )
_UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
_UpperCAmelCase = len(a__ )
self.assertEqual(config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
if self.is_encoder_decoder:
_UpperCAmelCase = model_class(a__ )
_UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(config.output_hidden_states , a__ )
check_decoder_attentions_output(a__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(a__ )
_UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(a__ )
_UpperCAmelCase = model(self._prepare_for_class(a__ , a__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(a__ ) )
self.assertEqual(model.config.output_hidden_states , a__ )
check_encoder_attentions_output(a__ )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def __A ( self ):
pass
def __A ( self ):
# TODO: Head-masking not yet implement
pass
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return tf.constant(SCREAMING_SNAKE_CASE,dtype=tf.intaa )
lowerCAmelCase_ = 1E-4
@slow
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
def __A ( self ):
_UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
_UpperCAmelCase = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
_UpperCAmelCase = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
_UpperCAmelCase = prepare_led_inputs_dict(model.config , a__ , a__ )
_UpperCAmelCase = model(**a__ )[0]
_UpperCAmelCase = (1, 10_24, 7_68)
self.assertEqual(output.shape , a__ )
# change to expected output here
_UpperCAmelCase = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]] , )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1E-3 )
def __A ( self ):
_UpperCAmelCase = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
_UpperCAmelCase = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
_UpperCAmelCase = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
_UpperCAmelCase = prepare_led_inputs_dict(model.config , a__ , a__ )
_UpperCAmelCase = model(**a__ )[0]
_UpperCAmelCase = (1, 10_24, model.config.vocab_size)
self.assertEqual(output.shape , a__ )
# change to expected output here
_UpperCAmelCase = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]] , )
tf.debugging.assert_near(output[:, :3, :3] , a__ , atol=1E-3 , rtol=1E-3 )
| 494
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = '''▁'''
lowerCAmelCase_ = {'''vocab_file''': '''sentencepiece.bpe.model''', '''monolingual_vocab_file''': '''dict.txt'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model''',
},
'''monolingual_vocab_file''': {
'''vinai/bartpho-syllable''': '''https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt''',
},
}
lowerCAmelCase_ = {'''vinai/bartpho-syllable''': 1_024}
class lowerCAmelCase ( snake_case ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self , a__ , a__ , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__ = None , **a__ , ):
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = monolingual_vocab_file
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a__ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_UpperCAmelCase = {}
_UpperCAmelCase = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(a__ ) not in self.fairseq_tokens_to_ids:
_UpperCAmelCase = cnt
cnt += 1
with open(a__ , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
_UpperCAmelCase = line.strip().split()[0]
_UpperCAmelCase = len(self.fairseq_tokens_to_ids )
if str(a__ ) not in self.fairseq_tokens_to_ids:
_UpperCAmelCase = len(self.fairseq_tokens_to_ids )
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
_UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , a__ ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __A ( self , a__ , a__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
_UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __A ( self , a__ , a__ = None , a__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1]
def __A ( self , a__ , a__ = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __A ( self ):
return len(self.fairseq_ids_to_tokens )
def __A ( self ):
_UpperCAmelCase = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self , a__ ):
return self.sp_model.encode(a__ , out_type=a__ )
def __A ( self , a__ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __A ( self , a__ ):
return self.fairseq_ids_to_tokens[index]
def __A ( self , a__ ):
_UpperCAmelCase = ''.join(a__ ).replace(a__ , ' ' ).strip()
return out_string
def __A ( self , a__ , a__ = None ):
if not os.path.isdir(a__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase = os.path.join(
a__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_UpperCAmelCase = os.path.join(
a__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , 'wb' ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(a__ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
a__ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , a__ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(a__ , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(a__ )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 494
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
snake_case_ : List[Any] = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 691
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , _a : Dict , _a : Dict=7 , _a : List[str]=3 , _a : str=18 , _a : Optional[int]=30 , _a : Tuple=400 , _a : Optional[Any]=True , _a : Dict=None , _a : str=True , _a : Tuple=None , _a : Any=True , _a : Any=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , _a : str=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , _a : List[Any]=True , ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =size if size is not None else {'''height''': 224, '''width''': 224}
_SCREAMING_SNAKE_CASE =crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =min_resolution
_SCREAMING_SNAKE_CASE =max_resolution
_SCREAMING_SNAKE_CASE =do_resize
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =do_center_crop
_SCREAMING_SNAKE_CASE =crop_size
_SCREAMING_SNAKE_CASE =do_normalize
_SCREAMING_SNAKE_CASE =image_mean
_SCREAMING_SNAKE_CASE =image_std
_SCREAMING_SNAKE_CASE =do_convert_rgb
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __UpperCamelCase ( self : Tuple , _a : Optional[Any]=False , _a : str=False , _a : Dict=False ) -> Dict:
"""simple docstring"""
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_SCREAMING_SNAKE_CASE =[]
for i in range(self.batch_size ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
if torchify:
_SCREAMING_SNAKE_CASE =[torch.from_numpy(_a ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , do_center_crop=_a )
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : List[str] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 224, '''width''': 224} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
_SCREAMING_SNAKE_CASE =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
@require_torch
@require_vision
class A__ ( UpperCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def __UpperCamelCase ( self : int ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=_a )
_SCREAMING_SNAKE_CASE =3
@property
def __UpperCamelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_center_crop''' ) )
self.assertTrue(hasattr(_a , '''center_crop''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
self.assertTrue(hasattr(_a , '''do_convert_rgb''' ) )
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE =self.image_processor_tester.prepare_inputs(equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_SCREAMING_SNAKE_CASE =image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 691
| 1
|
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def a__ ( a ) -> Optional[int]:
return 1.0 / (1.0 + np.exp(-_outputs ))
def a__ ( a ) -> Any:
A_ : str = np.max(_outputs , axis=-1 , keepdims=__UpperCAmelCase )
A_ : List[Any] = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=__UpperCAmelCase )
class __UpperCAmelCase( _snake_case ):
"""simple docstring"""
__magic_name__ = 'sigmoid'
__magic_name__ = 'softmax'
__magic_name__ = 'none'
@add_end_docstrings(
_snake_case , r"""\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n """ , )
class __UpperCAmelCase( _snake_case ):
"""simple docstring"""
__magic_name__ = False
__magic_name__ = ClassificationFunction.NONE
def __init__( self , **__magic_name__ ):
"""simple docstring"""
super().__init__(**__magic_name__ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def UpperCAmelCase ( self , __magic_name__=None , __magic_name__=None , __magic_name__="" , **__magic_name__ ):
"""simple docstring"""
A_ : int = tokenizer_kwargs
A_ : Dict = {}
if hasattr(self.model.config , '''return_all_scores''' ) and return_all_scores is None:
A_ : Optional[Any] = self.model.config.return_all_scores
if isinstance(__magic_name__ , __magic_name__ ) or top_k is None:
A_ : str = top_k
A_ : int = False
elif return_all_scores is not None:
warnings.warn(
'''`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'''
''' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.''' , __magic_name__ , )
if return_all_scores:
A_ : List[str] = None
else:
A_ : str = 1
if isinstance(__magic_name__ , __magic_name__ ):
A_ : str = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
A_ : Tuple = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *__magic_name__ , **__magic_name__ ):
"""simple docstring"""
A_ : Dict = super().__call__(*__magic_name__ , **__magic_name__ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
A_ : List[str] = '''top_k''' not in kwargs
if isinstance(args[0] , __magic_name__ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def UpperCAmelCase ( self , __magic_name__ , **__magic_name__ ):
"""simple docstring"""
A_ : Union[str, Any] = self.framework
if isinstance(__magic_name__ , __magic_name__ ):
return self.tokenizer(**__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
elif isinstance(__magic_name__ , __magic_name__ ) and len(__magic_name__ ) == 1 and isinstance(inputs[0] , __magic_name__ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=__magic_name__ , **__magic_name__ )
elif isinstance(__magic_name__ , __magic_name__ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'''The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'''
''' dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.''' )
return self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
return self.model(**__magic_name__ )
def UpperCAmelCase ( self , __magic_name__ , __magic_name__=None , __magic_name__=1 , __magic_name__=True ):
"""simple docstring"""
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
A_ : str = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
A_ : List[Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , '''function_to_apply''' ) and function_to_apply is None:
A_ : Tuple = self.model.config.function_to_apply
else:
A_ : Any = ClassificationFunction.NONE
A_ : Tuple = model_outputs['''logits'''][0]
A_ : Tuple = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
A_ : Tuple = sigmoid(__magic_name__ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
A_ : Optional[Any] = softmax(__magic_name__ )
elif function_to_apply == ClassificationFunction.NONE:
A_ : List[Any] = outputs
else:
raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
A_ : Union[str, Any] = [
{'''label''': self.model.config.idalabel[i], '''score''': score.item()} for i, score in enumerate(__magic_name__ )
]
if not _legacy:
dict_scores.sort(key=lambda __magic_name__ : x["score"] , reverse=__magic_name__ )
if top_k is not None:
A_ : Union[str, Any] = dict_scores[:top_k]
return dict_scores
| 713
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 236
| 0
|
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowerCAmelCase_ ( __snake_case , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = FlaxAutoencoderKL
@property
def __a ( self ):
_lowercase : Optional[Any] = 4
_lowercase : List[str] = 3
_lowercase : int = (3_2, 3_2)
_lowercase : List[str] = jax.random.PRNGKey(0 )
_lowercase : Union[str, Any] = jax.random.uniform(_lowerCAmelCase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def __a ( self ):
_lowercase : Union[str, Any] = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
_lowercase : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
| 66
|
"""simple docstring"""
def a_ ( __a ):
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
A__ = sorted(string.lower() )
return len(__a ) == len(set(__a ) )
if __name__ == "__main__":
__snake_case : Any = input('Enter a string ').strip()
__snake_case : Dict = is_isogram(input_str)
print(f'{input_str} is {"an" if isogram else "not an"} isogram.')
| 571
| 0
|
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( lowercase_ ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Optional[int] = BloomTokenizerFast
UpperCAmelCase : List[Any] = BloomTokenizerFast
UpperCAmelCase : str = True
UpperCAmelCase : str = False
UpperCAmelCase : str = """tokenizer_file"""
UpperCAmelCase : str = {"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""}
def __snake_case ( self : Optional[Any]):
super().setUp()
a : str = BloomTokenizerFast.from_pretrained("bigscience/tokenizer")
tokenizer.save_pretrained(self.tmpdirname)
def __snake_case ( self : List[Any] , **__UpperCAmelCase : List[Any]):
kwargs.update(self.special_tokens_map)
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_)
def __snake_case ( self : Optional[Any]):
a : Union[str, Any] = self.get_rust_tokenizer()
a : List[Any] = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
a : Any = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]]
a : List[Any] = tokenizer.batch_encode_plus(lowerCamelCase_)["""input_ids"""]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
a : List[str] = tokenizer.batch_decode(lowerCamelCase_)
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
def __snake_case ( self : List[str] , __UpperCAmelCase : Union[str, Any]=6):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
a : List[str] = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_)
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
a : Optional[int] = """This is a simple input"""
a : Tuple = ["""This is a simple input 1""", """This is a simple input 2"""]
a : Dict = ("""This is a simple input""", """This is a pair""")
a : List[str] = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(lowerCamelCase_ , max_length=lowerCamelCase_)
tokenizer_r.encode_plus(lowerCamelCase_ , max_length=lowerCamelCase_)
tokenizer_r.batch_encode_plus(lowerCamelCase_ , max_length=lowerCamelCase_)
tokenizer_r.encode(lowerCamelCase_ , max_length=lowerCamelCase_)
tokenizer_r.batch_encode_plus(lowerCamelCase_ , max_length=lowerCamelCase_)
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding")
a : Union[str, Any] = None # Hotfixing padding = None
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="max_length")
# Simple input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="max_length")
# Simple input
self.assertRaises(
lowerCamelCase_ , tokenizer_r.batch_encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="max_length" , )
# Pair input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="max_length")
# Pair input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="max_length")
# Pair input
self.assertRaises(
lowerCamelCase_ , tokenizer_r.batch_encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="max_length" , )
def __snake_case ( self : List[Any]):
a : Optional[Any] = self.get_rust_tokenizer()
a : Any = load_dataset("xnli" , "all_languages" , split="test" , streaming=lowerCamelCase_)
a : Union[str, Any] = next(iter(lowerCamelCase_))["""premise"""] # pick up one data
a : Optional[Any] = list(sample_data.values())
a : Union[str, Any] = list(map(tokenizer.encode , lowerCamelCase_))
a : Optional[int] = [tokenizer.decode(lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_) for x in output_tokens]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_)
def __snake_case ( self : int):
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map) , 1)
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]) , 1)
| 713
|
"""simple docstring"""
import os
def lowercase ( )-> Optional[Any]:
'''simple docstring'''
a : Optional[int] = os.path.join(os.path.dirname(A_ ) , "num.txt" )
with open(A_ ) as file_hand:
return str(sum(int(A_ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 135
| 0
|
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
lowerCAmelCase_ = logging.getLogger(__name__)
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=3_0_5_2_2, type=int)
lowerCAmelCase_ = parser.parse_args()
logger.info(F"""Loading data from {args.data_file}""")
with open(args.data_file, "rb") as fp:
lowerCAmelCase_ = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
lowerCAmelCase_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowerCAmelCase_ = [0] * args.vocab_size
for k, v in counter.items():
lowerCAmelCase_ = v
logger.info(F"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 326
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class A :
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Model name or path of model to be trained."""} )
_SCREAMING_SNAKE_CASE = field(
default="""./""" ,metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} )
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-train""" ,metadata={"""help""": """Name or path of training dataset."""} )
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-valid""" ,metadata={"""help""": """Name or path of validation dataset."""} )
_SCREAMING_SNAKE_CASE = field(default=2 ,metadata={"""help""": """Batch size for training."""} )
_SCREAMING_SNAKE_CASE = field(default=2 ,metadata={"""help""": """Batch size for evaluation."""} )
_SCREAMING_SNAKE_CASE = field(default=0.1 ,metadata={"""help""": """Value of weight decay."""} )
_SCREAMING_SNAKE_CASE = field(
default=10_000 ,metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} )
_SCREAMING_SNAKE_CASE = field(default=2E-4 ,metadata={"""help""": """Learning rate fo training."""} )
_SCREAMING_SNAKE_CASE = field(default="""cosine""" ,metadata={"""help""": """Learning rate."""} )
_SCREAMING_SNAKE_CASE = field(
default=750 ,metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} )
_SCREAMING_SNAKE_CASE = field(
default=16 ,metadata={"""help""": """Number of gradient accumulation steps."""} )
_SCREAMING_SNAKE_CASE = field(
default=__UpperCAmelCase ,metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} )
_SCREAMING_SNAKE_CASE = field(default=50_000 ,metadata={"""help""": """Maximum number of training steps."""} )
_SCREAMING_SNAKE_CASE = field(
default=-1 ,metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
_SCREAMING_SNAKE_CASE = field(default=1_024 ,metadata={"""help""": """Sequence lengths used for training."""} )
_SCREAMING_SNAKE_CASE = field(default=1 ,metadata={"""help""": """Training seed."""} )
_SCREAMING_SNAKE_CASE = field(
default=1_024 ,metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} ,)
_SCREAMING_SNAKE_CASE = field(
default=__UpperCAmelCase ,metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} )
_SCREAMING_SNAKE_CASE = field(default=__UpperCAmelCase ,metadata={"""help""": """If True the data is pretokenized."""} )
@dataclass
class A :
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Model name or path of model to be evaluated."""} )
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-valid""" ,metadata={"""help""": """Name or path of validation dataset."""} )
_SCREAMING_SNAKE_CASE = field(default=2 ,metadata={"""help""": """Batch size used for evaluation."""} )
_SCREAMING_SNAKE_CASE = field(
default=-1 ,metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} )
_SCREAMING_SNAKE_CASE = field(default=1_024 ,metadata={"""help""": """Length of sequences to be evaluated."""} )
_SCREAMING_SNAKE_CASE = field(default=1 ,metadata={"""help""": """Random seed used for evaluation."""} )
@dataclass
class A :
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Model name or path of model to be evaluated."""} )
_SCREAMING_SNAKE_CASE = field(default=__UpperCAmelCase ,metadata={"""help""": """Number of workers used for code evaluation."""} )
_SCREAMING_SNAKE_CASE = field(
default=__UpperCAmelCase ,metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} ,)
_SCREAMING_SNAKE_CASE = field(
default=__UpperCAmelCase ,metadata={"""help""": """Sample from the language model's output distribution."""} )
_SCREAMING_SNAKE_CASE = field(default=0.2 ,metadata={"""help""": """Sampling temperature used for generation."""} )
_SCREAMING_SNAKE_CASE = field(default=256 ,metadata={"""help""": """Maximum number of newly generated tokens."""} )
_SCREAMING_SNAKE_CASE = field(default=0 ,metadata={"""help""": """Top-k parameter used for generation."""} )
_SCREAMING_SNAKE_CASE = field(default=0.9_5 ,metadata={"""help""": """Top-p parameter used for nucleus sampling."""} )
_SCREAMING_SNAKE_CASE = field(default=10 ,metadata={"""help""": """Number of generations to run in parallel."""} )
_SCREAMING_SNAKE_CASE = field(
default=200 ,metadata={"""help""": """Number of completions to generate for each sample."""} )
_SCREAMING_SNAKE_CASE = field(default=1 ,metadata={"""help""": """Random seed used for evaluation."""} )
_SCREAMING_SNAKE_CASE = field(
default="""eval_results.json""" ,metadata={"""help""": """Random seed used for evaluation."""} )
_SCREAMING_SNAKE_CASE = field(
default="""0""" ,metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} )
_SCREAMING_SNAKE_CASE = field(
default=-1 ,metadata={
"""help""": (
"""Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"""
""" number corresponds to which GPU device id to run on."""
)
} ,)
@dataclass
class A :
_SCREAMING_SNAKE_CASE = field(
default=__UpperCAmelCase ,metadata={
"""help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."""
} ,)
_SCREAMING_SNAKE_CASE = field(
default="""transformersbook/codeparrot""" ,metadata={"""help""": """Folder or name of dataset to process."""} )
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot-clean""" ,metadata={"""help""": """Folder to save processed processed dataset."""} )
_SCREAMING_SNAKE_CASE = field(
default=100_000 ,metadata={"""help""": """Number of files to save per JSON output file."""} )
_SCREAMING_SNAKE_CASE = field(default="""content""" ,metadata={"""help""": """Column containing text data to process."""} )
_SCREAMING_SNAKE_CASE = field(
default=1_000 ,metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} )
_SCREAMING_SNAKE_CASE = field(
default=100 ,metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} )
_SCREAMING_SNAKE_CASE = field(
default=0.2_5 ,metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} )
_SCREAMING_SNAKE_CASE = field(
default=1.5 ,metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} )
_SCREAMING_SNAKE_CASE = field(
default=0.7 ,metadata={"""help""": """Probability for filtering config, test and uncommon files."""} )
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Name or path to the tokenizer."""} ,)
_SCREAMING_SNAKE_CASE = field(
default=__UpperCAmelCase ,metadata={"""help""": """If True, near-duplicate samples are removed."""} )
_SCREAMING_SNAKE_CASE = field(
default=0.8_5 ,metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} )
@dataclass
class A :
_SCREAMING_SNAKE_CASE = field(
default="""gpt2""" ,metadata={"""help""": """Base tokenizer to build new tokenizer from."""} )
_SCREAMING_SNAKE_CASE = field(
default="""transformersbook/codeparrot-train""" ,metadata={"""help""": """Dataset to train tokenizer on."""} )
_SCREAMING_SNAKE_CASE = field(default="""content""" ,metadata={"""help""": """Column containing text data to process."""} )
_SCREAMING_SNAKE_CASE = field(default=200_000 ,metadata={"""help""": """Number of examples to train tokenizer on."""} )
_SCREAMING_SNAKE_CASE = field(
default=32_768 ,metadata={"""help""": """Number of examples to train the tokenizer on."""} )
_SCREAMING_SNAKE_CASE = field(default="""codeparrot""" ,metadata={"""help""": """Name of new tokenizer."""} )
_SCREAMING_SNAKE_CASE = field(default=__UpperCAmelCase ,metadata={"""help""": """Push saved tokenizer to the hub."""} )
@dataclass
class A :
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Name or path to the tokenizer."""} )
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot-clean-train""" ,metadata={"""help""": """Name or path to the dataset to pretokenize."""} )
_SCREAMING_SNAKE_CASE = field(
default="""tokenized-codeparrot-train""" ,metadata={"""help""": """Repo name of the pretokenized data."""} )
_SCREAMING_SNAKE_CASE = field(default=__UpperCAmelCase ,metadata={"""help""": """Number of workers used for code evaluation."""} )
@dataclass
class A :
_SCREAMING_SNAKE_CASE = field(
default="""gpt2-large""" ,metadata={"""help""": """Configuration to use for model initialization."""} )
_SCREAMING_SNAKE_CASE = field(
default="""codeparrot/codeparrot""" ,metadata={"""help""": """Tokenizer attached to model."""} )
_SCREAMING_SNAKE_CASE = field(default="""codeparrot""" ,metadata={"""help""": """Name of the created model."""} )
_SCREAMING_SNAKE_CASE = field(default=__UpperCAmelCase ,metadata={"""help""": """Push saved tokenizer to the hub."""} )
| 326
| 1
|
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
a =logging.get_logger(__name__)
a ={
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class A_ ( UpperCamelCase__ ):
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,*SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : Union[str, Any]):
super().__init__(*_a ,**_a)
if config is None:
assert isinstance(self.model ,_a), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F" {self.model.__class__}"
)
__lowerCamelCase : Optional[Any] = self.model.config
else:
__lowerCamelCase : int = config
__lowerCamelCase : Optional[Any] = data_args
__lowerCamelCase : int = self.config.tgt_vocab_size if isinstance(self.config ,_a) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"
' padding..')
if self.args.label_smoothing == 0:
__lowerCamelCase : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id)
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
__lowerCamelCase : Union[str, Any] = label_smoothed_nll_loss
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Tuple):
if self.optimizer is None:
__lowerCamelCase : List[str] = ["""bias""", """LayerNorm.weight"""]
__lowerCamelCase : str = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],
"""weight_decay""": 0.0,
},
]
__lowerCamelCase : Optional[Any] = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
__lowerCamelCase : Dict = Adafactor
__lowerCamelCase : int = {"""scale_parameter""": False, """relative_step""": False}
else:
__lowerCamelCase : int = AdamW
__lowerCamelCase : Any = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
__lowerCamelCase : List[str] = self.args.learning_rate
if self.sharded_ddp:
__lowerCamelCase : List[str] = OSS(
params=_a ,optim=_a ,**_a ,)
else:
__lowerCamelCase : Tuple = optimizer_cls(_a ,**_a)
if self.lr_scheduler is None:
__lowerCamelCase : Union[str, Any] = self._get_lr_scheduler(_a)
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.')
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : List[Any] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
__lowerCamelCase : Optional[Any] = schedule_func(self.optimizer)
elif self.args.lr_scheduler == "constant_w_warmup":
__lowerCamelCase : Optional[int] = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps)
else:
__lowerCamelCase : List[Any] = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=_a)
return scheduler
def lowerCAmelCase ( self : Dict):
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset)
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset)
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset)
)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : str):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
__lowerCamelCase : List[str] = model(**_a ,use_cache=_a)[0]
__lowerCamelCase : Dict = self.loss_fn(logits.view(-1 ,logits.shape[-1]) ,labels.view(-1))
else:
# compute usual loss via models
__lowerCamelCase : str = model(**_a ,labels=_a ,use_cache=_a)[:2]
else:
# compute label smoothed loss
__lowerCamelCase : Any = model(**_a ,use_cache=_a)[0]
__lowerCamelCase : Union[str, Any] = torch.nn.functional.log_softmax(_a ,dim=-1)
__lowerCamelCase : List[str] = self.loss_fn(_a ,_a ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id)
return loss, logits
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Any):
__lowerCamelCase : Optional[int] = inputs.pop('labels')
__lowerCamelCase : Dict = self._compute_loss(_a ,_a ,_a)
return loss
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[Any] = None ,):
__lowerCamelCase : int = self._prepare_inputs(_a)
__lowerCamelCase : Dict = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
__lowerCamelCase : List[str] = self.model.generate(
inputs['input_ids'] ,attention_mask=inputs['attention_mask'] ,**_a ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
__lowerCamelCase : str = self._pad_tensors_to_max_len(_a ,gen_kwargs['max_length'])
__lowerCamelCase : Any = inputs.pop('labels')
with torch.no_grad():
# compute loss on predict data
__lowerCamelCase : Tuple = self._compute_loss(_a ,_a ,_a)
__lowerCamelCase : Any = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
__lowerCamelCase : List[Any] = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
__lowerCamelCase : Any = self._pad_tensors_to_max_len(_a ,gen_kwargs['max_length'])
return (loss, logits, labels)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Any):
# If PAD token is not defined at least EOS token has to be defined
__lowerCamelCase : Optional[Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F" padded to `max_length`={max_length}")
__lowerCamelCase : Dict = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device)
__lowerCamelCase : Dict = tensor
return padded_tensor
| 710
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
a =logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Tuple ,*SCREAMING_SNAKE_CASE__ : Any ,**SCREAMING_SNAKE_CASE__ : List[Any]):
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' ,SCREAMING_SNAKE_CASE__ ,)
super().__init__(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
| 337
| 0
|
from functools import lru_cache
@lru_cache
def __lowercase ( lowerCamelCase : int ):
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 417
|
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
a_ = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
a_ = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
a_ = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : str , snake_case : Any , snake_case : Optional[Any]=4 , snake_case : str=False ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = compute_bleu(
reference_corpus=snake_case , translation_corpus=snake_case , max_order=snake_case , smooth=snake_case )
((UpperCamelCase_), (UpperCamelCase_), (UpperCamelCase_), (UpperCamelCase_), (UpperCamelCase_), (UpperCamelCase_)) : int = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 417
| 1
|
from __future__ import annotations
import numpy as np
def _UpperCAmelCase (UpperCamelCase_ : np.ndarray ):
'''simple docstring'''
_lowerCAmelCase : int = np.shape(UpperCamelCase_ )
if rows != columns:
_lowerCAmelCase : Optional[int] = (
"""'table' has to be of square shaped array but got a """
F"{rows}x{columns} array:\n{table}"
)
raise ValueError(UpperCamelCase_ )
_lowerCAmelCase : Any = np.zeros((rows, columns) )
_lowerCAmelCase : Any = np.zeros((rows, columns) )
for i in range(UpperCamelCase_ ):
for j in range(UpperCamelCase_ ):
_lowerCAmelCase : Tuple = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase_ ) )
if upper[j][j] == 0:
raise ArithmeticError("""No LU decomposition exists""" )
_lowerCAmelCase : Union[str, Any] = (table[i][j] - total) / upper[j][j]
_lowerCAmelCase : Optional[int] = 1
for j in range(UpperCamelCase_ , UpperCamelCase_ ):
_lowerCAmelCase : Union[str, Any] = sum(lower[i][k] * upper[k][j] for k in range(UpperCamelCase_ ) )
_lowerCAmelCase : List[str] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _UpperCAmelCase (UpperCamelCase_ : list[list[float]] ):
'''simple docstring'''
_lowerCAmelCase : int = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(UpperCamelCase_ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_lowerCAmelCase : Optional[Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
_lowerCAmelCase : Union[str, Any] = [[0.0, 0.0], [0.0, 0.0]]
_lowerCAmelCase , _lowerCAmelCase : Any = matrix[1][1], matrix[0][0]
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(UpperCamelCase_ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(UpperCamelCase_ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_lowerCAmelCase : List[str] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
_lowerCAmelCase : Optional[Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_lowerCAmelCase : Optional[Any] = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_lowerCAmelCase : str = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_lowerCAmelCase : Any = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_lowerCAmelCase : List[str] = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_lowerCAmelCase : Tuple = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_lowerCAmelCase : Any = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_lowerCAmelCase : int = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_lowerCAmelCase : Optional[Any] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_lowerCAmelCase : Tuple = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_lowerCAmelCase : List[str] = array(UpperCamelCase_ )
for i in range(3 ):
for j in range(3 ):
_lowerCAmelCase : List[Any] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_lowerCAmelCase : Tuple = array(UpperCamelCase_ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(UpperCamelCase_ )
# Calculate the inverse of the matrix
return [[float(d(UpperCamelCase_ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 196
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Tuple =logging.get_logger(__name__)
A_ : List[str] ={
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class lowercase_ ( UpperCAmelCase__):
"""simple docstring"""
snake_case_ = 'xlm'
snake_case_ = {
'hidden_size': 'emb_dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
'n_words': 'vocab_size', # For backward compatibility
}
def __init__( self , _UpperCAmelCase=30_145 , _UpperCAmelCase=2_048 , _UpperCAmelCase=12 , _UpperCAmelCase=16 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=1 , _UpperCAmelCase=True , _UpperCAmelCase=512 , _UpperCAmelCase=2_048**-0.5 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=5 , _UpperCAmelCase=True , _UpperCAmelCase="first" , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=0.1 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=0 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
"""simple docstring"""
a_ = vocab_size
a_ = emb_dim
a_ = n_layers
a_ = n_heads
a_ = dropout
a_ = attention_dropout
a_ = gelu_activation
a_ = sinusoidal_embeddings
a_ = causal
a_ = asm
a_ = n_langs
a_ = use_lang_emb
a_ = layer_norm_eps
a_ = bos_index
a_ = eos_index
a_ = pad_index
a_ = unk_index
a_ = mask_index
a_ = is_encoder
a_ = max_position_embeddings
a_ = embed_init_std
a_ = init_std
a_ = summary_type
a_ = summary_use_proj
a_ = summary_activation
a_ = summary_proj_to_labels
a_ = summary_first_dropout
a_ = start_n_top
a_ = end_n_top
a_ = mask_token_id
a_ = lang_id
if "n_words" in kwargs:
a_ = kwargs["n_words"]
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
class lowercase_ ( UpperCAmelCase__):
"""simple docstring"""
@property
def lowercase__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
a_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
a_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 483
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class _lowercase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: Union[str, Any] = BertJapaneseTokenizer
SCREAMING_SNAKE_CASE: Dict = False
SCREAMING_SNAKE_CASE: List[Any] = True
def _a ( self ):
super().setUp()
lowerCAmelCase_: Any = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
lowerCAmelCase_: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _a ( self , lowerCamelCase__ ):
lowerCAmelCase_: Optional[int] = "こんにちは、世界。 \nこんばんは、世界。"
lowerCAmelCase_: Optional[int] = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def _a ( self , lowerCamelCase__ ):
lowerCAmelCase_ , lowerCAmelCase_: str = self.get_input_output_texts(lowerCamelCase__ )
lowerCAmelCase_: Dict = tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
lowerCAmelCase_: List[str] = tokenizer.decode(lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
return text, ids
def _a ( self ):
pass # TODO add if relevant
def _a ( self ):
pass # TODO add if relevant
def _a ( self ):
pass # TODO add if relevant
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = self.tokenizer_class(self.vocab_file )
lowerCAmelCase_: Any = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(lowerCamelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def _a ( self ):
lowerCAmelCase_: List[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(lowerCamelCase__ )
lowerCAmelCase_: str = "こんにちは、世界。\nこんばんは、世界。"
lowerCAmelCase_: List[Any] = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase_: Dict = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(lowerCamelCase__ , "wb" ) as handle:
pickle.dump(lowerCamelCase__ , lowerCamelCase__ )
with open(lowerCamelCase__ , "rb" ) as handle:
lowerCAmelCase_: Union[str, Any] = pickle.load(lowerCamelCase__ )
lowerCAmelCase_: Union[str, Any] = tokenizer_new.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def _a ( self ):
lowerCAmelCase_: List[str] = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _a ( self ):
try:
lowerCAmelCase_: List[Any] = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _a ( self ):
try:
lowerCAmelCase_: str = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _a ( self ):
lowerCAmelCase_: List[Any] = MecabTokenizer(do_lower_case=lowerCamelCase__ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def _a ( self ):
try:
lowerCAmelCase_: Any = MecabTokenizer(
do_lower_case=lowerCamelCase__ , normalize_text=lowerCamelCase__ , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def _a ( self ):
lowerCAmelCase_: str = MecabTokenizer(normalize_text=lowerCamelCase__ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def _a ( self ):
lowerCAmelCase_: Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(lowerCamelCase__ )
lowerCAmelCase_: str = "こんにちは、世界。\nこんばんは、世界。"
lowerCAmelCase_: List[str] = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase_: Optional[int] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(lowerCamelCase__ , "wb" ) as handle:
pickle.dump(lowerCamelCase__ , lowerCamelCase__ )
with open(lowerCamelCase__ , "rb" ) as handle:
lowerCAmelCase_: List[str] = pickle.load(lowerCamelCase__ )
lowerCAmelCase_: Optional[int] = tokenizer_new.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@require_sudachi
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _a ( self ):
lowerCAmelCase_: Optional[int] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def _a ( self ):
lowerCAmelCase_: Any = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def _a ( self ):
lowerCAmelCase_: Dict = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def _a ( self ):
lowerCAmelCase_: List[str] = SudachiTokenizer(do_lower_case=lowerCamelCase__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = SudachiTokenizer(normalize_text=lowerCamelCase__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def _a ( self ):
lowerCAmelCase_: Tuple = SudachiTokenizer(trim_whitespace=lowerCamelCase__ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def _a ( self ):
lowerCAmelCase_: int = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(lowerCamelCase__ )
lowerCAmelCase_: Any = "こんにちは、世界。\nこんばんは、世界。"
lowerCAmelCase_: Optional[int] = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowerCAmelCase_: Optional[Any] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(lowerCamelCase__ , "wb" ) as handle:
pickle.dump(lowerCamelCase__ , lowerCamelCase__ )
with open(lowerCamelCase__ , "rb" ) as handle:
lowerCAmelCase_: Any = pickle.load(lowerCamelCase__ )
lowerCAmelCase_: Union[str, Any] = tokenizer_new.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
@require_jumanpp
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _a ( self ):
lowerCAmelCase_: List[str] = JumanppTokenizer(do_lower_case=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _a ( self ):
lowerCAmelCase_: Optional[Any] = JumanppTokenizer(normalize_text=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def _a ( self ):
lowerCAmelCase_: List[str] = JumanppTokenizer(trim_whitespace=lowerCamelCase__ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def _a ( self ):
lowerCAmelCase_: Any = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def _a ( self ):
lowerCAmelCase_: Optional[int] = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
lowerCAmelCase_: Tuple = {}
for i, token in enumerate(lowerCamelCase__ ):
lowerCAmelCase_: List[Any] = i
lowerCAmelCase_: List[str] = WordpieceTokenizer(vocab=lowerCamelCase__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def _a ( self ):
lowerCAmelCase_: List[str] = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
lowerCAmelCase_: Optional[Any] = tokenizer.subword_tokenizer
lowerCAmelCase_: List[str] = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(lowerCamelCase__ , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
lowerCAmelCase_: Optional[int] = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(lowerCamelCase__ , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def _a ( self ):
lowerCAmelCase_: str = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
lowerCAmelCase_: int = tokenizer.encode("ありがとう。" , add_special_tokens=lowerCamelCase__ )
lowerCAmelCase_: Optional[int] = tokenizer.encode("どういたしまして。" , add_special_tokens=lowerCamelCase__ )
lowerCAmelCase_: List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
lowerCAmelCase_: List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _lowercase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: Tuple = BertJapaneseTokenizer
SCREAMING_SNAKE_CASE: Optional[Any] = False
def _a ( self ):
super().setUp()
lowerCAmelCase_: Any = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
lowerCAmelCase_: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _a ( self , **lowerCamelCase__ ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **lowerCamelCase__ )
def _a ( self , lowerCamelCase__ ):
lowerCAmelCase_: str = "こんにちは、世界。 \nこんばんは、世界。"
lowerCAmelCase_: Tuple = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def _a ( self ):
pass # TODO add if relevant
def _a ( self ):
pass # TODO add if relevant
def _a ( self ):
pass # TODO add if relevant
def _a ( self ):
lowerCAmelCase_: str = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
lowerCAmelCase_: Any = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
lowerCamelCase__ , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def _a ( self ):
lowerCAmelCase_: Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
lowerCAmelCase_: List[str] = {}
for i, token in enumerate(lowerCamelCase__ ):
lowerCAmelCase_: Optional[Any] = i
lowerCAmelCase_: List[Any] = CharacterTokenizer(vocab=lowerCamelCase__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def _a ( self ):
lowerCAmelCase_: Optional[Any] = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
lowerCAmelCase_: Any = tokenizer.encode("ありがとう。" , add_special_tokens=lowerCamelCase__ )
lowerCAmelCase_: Dict = tokenizer.encode("どういたしまして。" , add_special_tokens=lowerCamelCase__ )
lowerCAmelCase_: Dict = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
lowerCAmelCase_: Any = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ , lowerCamelCase__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self ):
lowerCAmelCase_: Union[str, Any] = "cl-tohoku/bert-base-japanese"
lowerCAmelCase_: Optional[int] = AutoTokenizer.from_pretrained(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self ):
lowerCAmelCase_: Dict = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(lowerCamelCase__ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
lowerCAmelCase_: List[str] = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCamelCase__ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 613
| 0
|
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def _UpperCamelCase (a__ :Union[str, Any] , a__ :Optional[Any]=1 ):
"""simple docstring"""
if n_shave_prefix_segments >= 0:
return ".".join(path.split(""".""" )[n_shave_prefix_segments:] )
else:
return ".".join(path.split(""".""" )[:n_shave_prefix_segments] )
def _UpperCamelCase (a__ :Dict , a__ :int=0 ):
"""simple docstring"""
UpperCamelCase__ = []
for old_item in old_list:
UpperCamelCase__ = old_item.replace("""in_layers.0""" , """norm1""" )
UpperCamelCase__ = new_item.replace("""in_layers.2""" , """conv1""" )
UpperCamelCase__ = new_item.replace("""out_layers.0""" , """norm2""" )
UpperCamelCase__ = new_item.replace("""out_layers.3""" , """conv2""" )
UpperCamelCase__ = new_item.replace("""emb_layers.1""" , """time_emb_proj""" )
UpperCamelCase__ = new_item.replace("""skip_connection""" , """conv_shortcut""" )
UpperCamelCase__ = shave_segments(a__ , n_shave_prefix_segments=a__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _UpperCamelCase (a__ :str , a__ :Union[str, Any]=0 ):
"""simple docstring"""
UpperCamelCase__ = []
for old_item in old_list:
UpperCamelCase__ = old_item
UpperCamelCase__ = new_item.replace("""norm.weight""" , """group_norm.weight""" )
UpperCamelCase__ = new_item.replace("""norm.bias""" , """group_norm.bias""" )
UpperCamelCase__ = new_item.replace("""proj_out.weight""" , """proj_attn.weight""" )
UpperCamelCase__ = new_item.replace("""proj_out.bias""" , """proj_attn.bias""" )
UpperCamelCase__ = shave_segments(a__ , n_shave_prefix_segments=a__ )
mapping.append({"""old""": old_item, """new""": new_item} )
return mapping
def _UpperCamelCase (a__ :Union[str, Any] , a__ :List[str] , a__ :Optional[Any] , a__ :Union[str, Any]=None , a__ :List[Any]=None , a__ :List[Any]=None ):
"""simple docstring"""
assert isinstance(a__ , a__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
UpperCamelCase__ = old_checkpoint[path]
UpperCamelCase__ = old_tensor.shape[0] // 3
UpperCamelCase__ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
UpperCamelCase__ = old_tensor.shape[0] // config["""num_head_channels"""] // 3
UpperCamelCase__ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = old_tensor.split(channels // num_heads , dim=1 )
UpperCamelCase__ = query.reshape(a__ )
UpperCamelCase__ = key.reshape(a__ )
UpperCamelCase__ = value.reshape(a__ )
for path in paths:
UpperCamelCase__ = path["""new"""]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
UpperCamelCase__ = new_path.replace("""middle_block.0""" , """mid_block.resnets.0""" )
UpperCamelCase__ = new_path.replace("""middle_block.1""" , """mid_block.attentions.0""" )
UpperCamelCase__ = new_path.replace("""middle_block.2""" , """mid_block.resnets.1""" )
if additional_replacements is not None:
for replacement in additional_replacements:
UpperCamelCase__ = new_path.replace(replacement["""old"""] , replacement["""new"""] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
UpperCamelCase__ = old_checkpoint[path["""old"""]][:, :, 0]
else:
UpperCamelCase__ = old_checkpoint[path["""old"""]]
def _UpperCamelCase (a__ :Optional[Any] , a__ :Tuple ):
"""simple docstring"""
UpperCamelCase__ = {}
UpperCamelCase__ = checkpoint["""time_embed.0.weight"""]
UpperCamelCase__ = checkpoint["""time_embed.0.bias"""]
UpperCamelCase__ = checkpoint["""time_embed.2.weight"""]
UpperCamelCase__ = checkpoint["""time_embed.2.bias"""]
UpperCamelCase__ = checkpoint["""input_blocks.0.0.weight"""]
UpperCamelCase__ = checkpoint["""input_blocks.0.0.bias"""]
UpperCamelCase__ = checkpoint["""out.0.weight"""]
UpperCamelCase__ = checkpoint["""out.0.bias"""]
UpperCamelCase__ = checkpoint["""out.2.weight"""]
UpperCamelCase__ = checkpoint["""out.2.bias"""]
# Retrieves the keys for the input blocks only
UpperCamelCase__ = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """input_blocks""" in layer} )
UpperCamelCase__ = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(a__ )
}
# Retrieves the keys for the middle blocks only
UpperCamelCase__ = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """middle_block""" in layer} )
UpperCamelCase__ = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(a__ )
}
# Retrieves the keys for the output blocks only
UpperCamelCase__ = len({""".""".join(layer.split(""".""" )[:2] ) for layer in checkpoint if """output_blocks""" in layer} )
UpperCamelCase__ = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(a__ )
}
for i in range(1 , a__ ):
UpperCamelCase__ = (i - 1) // (config["""num_res_blocks"""] + 1)
UpperCamelCase__ = (i - 1) % (config["""num_res_blocks"""] + 1)
UpperCamelCase__ = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
UpperCamelCase__ = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
UpperCamelCase__ = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
UpperCamelCase__ = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
UpperCamelCase__ = renew_resnet_paths(a__ )
UpperCamelCase__ = {"""old""": f"""input_blocks.{i}.0""", """new""": f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
UpperCamelCase__ = {"""old""": """resnets.2.op""", """new""": """downsamplers.0.op"""}
assign_to_checkpoint(
a__ , a__ , a__ , additional_replacements=[meta_path, resnet_op] , config=a__ )
if len(a__ ):
UpperCamelCase__ = renew_attention_paths(a__ )
UpperCamelCase__ = {
"""old""": f"""input_blocks.{i}.1""",
"""new""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCamelCase__ = {
f"""input_blocks.{i}.1.qkv.bias""": {
"""key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"""query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"""value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
"""key""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"""query""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"""value""": f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
a__ , a__ , a__ , additional_replacements=[meta_path] , attention_paths_to_split=a__ , config=a__ , )
UpperCamelCase__ = middle_blocks[0]
UpperCamelCase__ = middle_blocks[1]
UpperCamelCase__ = middle_blocks[2]
UpperCamelCase__ = renew_resnet_paths(a__ )
assign_to_checkpoint(a__ , a__ , a__ , config=a__ )
UpperCamelCase__ = renew_resnet_paths(a__ )
assign_to_checkpoint(a__ , a__ , a__ , config=a__ )
UpperCamelCase__ = renew_attention_paths(a__ )
UpperCamelCase__ = {
"""middle_block.1.qkv.bias""": {
"""key""": """mid_block.attentions.0.key.bias""",
"""query""": """mid_block.attentions.0.query.bias""",
"""value""": """mid_block.attentions.0.value.bias""",
},
"""middle_block.1.qkv.weight""": {
"""key""": """mid_block.attentions.0.key.weight""",
"""query""": """mid_block.attentions.0.query.weight""",
"""value""": """mid_block.attentions.0.value.weight""",
},
}
assign_to_checkpoint(
a__ , a__ , a__ , attention_paths_to_split=a__ , config=a__ )
for i in range(a__ ):
UpperCamelCase__ = i // (config["""num_res_blocks"""] + 1)
UpperCamelCase__ = i % (config["""num_res_blocks"""] + 1)
UpperCamelCase__ = [shave_segments(a__ , 2 ) for name in output_blocks[i]]
UpperCamelCase__ = {}
for layer in output_block_layers:
UpperCamelCase__ , UpperCamelCase__ = layer.split(""".""" )[0], shave_segments(a__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(a__ )
else:
UpperCamelCase__ = [layer_name]
if len(a__ ) > 1:
UpperCamelCase__ = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
UpperCamelCase__ = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
UpperCamelCase__ = renew_resnet_paths(a__ )
UpperCamelCase__ = renew_resnet_paths(a__ )
UpperCamelCase__ = {"""old""": f"""output_blocks.{i}.0""", """new""": f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(a__ , a__ , a__ , additional_replacements=[meta_path] , config=a__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
UpperCamelCase__ = list(output_block_list.values() ).index(["""conv.weight""", """conv.bias"""] )
UpperCamelCase__ = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
UpperCamelCase__ = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(a__ ) == 2:
UpperCamelCase__ = []
if len(a__ ):
UpperCamelCase__ = renew_attention_paths(a__ )
UpperCamelCase__ = {
"""old""": f"""output_blocks.{i}.1""",
"""new""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
UpperCamelCase__ = {
f"""output_blocks.{i}.1.qkv.bias""": {
"""key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
"""query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
"""value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
"""key""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
"""query""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
"""value""": f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
a__ , a__ , a__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any("""qkv""" in key for key in attentions ) else None , config=a__ , )
else:
UpperCamelCase__ = renew_resnet_paths(a__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
UpperCamelCase__ = """.""".join(["""output_blocks""", str(a__ ), path["""old"""]] )
UpperCamelCase__ = """.""".join(["""up_blocks""", str(a__ ), """resnets""", str(a__ ), path["""new"""]] )
UpperCamelCase__ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
UpperCamelCase__ = json.loads(f.read())
UpperCamelCase__ = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
UpperCamelCase__ = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
UpperCamelCase__ = DDPMScheduler.from_config("/".join(args.checkpoint_path.split("/")[:-1]))
UpperCamelCase__ = VQModel.from_pretrained("/".join(args.checkpoint_path.split("/")[:-1]))
UpperCamelCase__ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 548
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _UpperCamelCase ():
"""simple docstring"""
UpperCamelCase__ = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
UpperCamelCase__ = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(a__ )
# Let's go
UpperCamelCase__ = parser.parse_args()
if not hasattr(a__ , """func""" ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase__ = args.func(a__ )
service.run()
if __name__ == "__main__":
main()
| 548
| 1
|
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class snake_case_ ( a_ ):
'''simple docstring'''
def __init__( self : List[Any] ) -> Optional[Any]:
lowerCamelCase_ : Any = []
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : int , **__magic_name__ : int ) -> Any:
self.events.append("on_init_end" )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , **__magic_name__ : List[str] ) -> Tuple:
self.events.append("on_train_begin" )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , **__magic_name__ : Optional[Any] ) -> Dict:
self.events.append("on_train_end" )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , **__magic_name__ : List[str] ) -> Dict:
self.events.append("on_epoch_begin" )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : str , **__magic_name__ : Union[str, Any] ) -> int:
self.events.append("on_epoch_end" )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : int , **__magic_name__ : Dict ) -> Optional[int]:
self.events.append("on_step_begin" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , **__magic_name__ : Optional[Any] ) -> Tuple:
self.events.append("on_step_end" )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , **__magic_name__ : List[Any] ) -> Union[str, Any]:
self.events.append("on_evaluate" )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : List[str] , **__magic_name__ : List[Any] ) -> List[Any]:
self.events.append("on_predict" )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : int , **__magic_name__ : Dict ) -> Dict:
self.events.append("on_save" )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : Dict , **__magic_name__ : int ) -> Optional[Any]:
self.events.append("on_log" )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[int] , __magic_name__ : List[str] , **__magic_name__ : Any ) -> Optional[int]:
self.events.append("on_prediction_step" )
@require_torch
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
lowerCamelCase_ : Optional[int] = tempfile.mkdtemp()
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
shutil.rmtree(self.output_dir )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : str=0 , __magic_name__ : List[str]=0 , __magic_name__ : Any=64 , __magic_name__ : Union[str, Any]=64 , __magic_name__ : Tuple=None , __magic_name__ : str=False , **__magic_name__ : Union[str, Any] ) -> List[str]:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowerCamelCase_ : Tuple = RegressionDataset(length=_lowercase )
lowerCamelCase_ : List[Any] = RegressionDataset(length=_lowercase )
lowerCamelCase_ : Optional[Any] = RegressionModelConfig(a=_lowercase , b=_lowercase )
lowerCamelCase_ : Tuple = RegressionPreTrainedModel(_lowercase )
lowerCamelCase_ : int = TrainingArguments(self.output_dir , disable_tqdm=_lowercase , report_to=[] , **_lowercase )
return Trainer(
_lowercase , _lowercase , train_dataset=_lowercase , eval_dataset=_lowercase , callbacks=_lowercase , )
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] ) -> Any:
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
# Order doesn't matter
lowerCamelCase_ : List[Any] = sorted(_lowercase , key=lambda __magic_name__ : cb.__name__ if isinstance(_lowercase , _lowercase ) else cb.__class__.__name__ )
lowerCamelCase_ : Optional[int] = sorted(_lowercase , key=lambda __magic_name__ : cb.__name__ if isinstance(_lowercase , _lowercase ) else cb.__class__.__name__ )
for cba, cba in zip(_lowercase , _lowercase ):
if isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ):
self.assertEqual(_lowercase , _lowercase )
elif isinstance(_lowercase , _lowercase ) and not isinstance(_lowercase , _lowercase ):
self.assertEqual(_lowercase , cba.__class__ )
elif not isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ):
self.assertEqual(cba.__class__ , _lowercase )
else:
self.assertEqual(_lowercase , _lowercase )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Optional[Any] ) -> Tuple:
lowerCamelCase_ : List[str] = ['''on_init_end''', '''on_train_begin''']
lowerCamelCase_ : Any = 0
lowerCamelCase_ : List[str] = len(trainer.get_eval_dataloader() )
lowerCamelCase_ : Union[str, Any] = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("on_epoch_begin" )
for _ in range(_lowercase ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save" )
expected_events.append("on_epoch_end" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
lowerCamelCase_ : Optional[Any] = self.get_trainer()
lowerCamelCase_ : Optional[Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _lowercase )
# Callbacks passed at init are added to the default callbacks
lowerCamelCase_ : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(_lowercase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _lowercase )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowerCamelCase_ : Dict = self.get_trainer(disable_tqdm=_lowercase )
lowerCamelCase_ : str = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _lowercase )
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
lowerCamelCase_ : Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowerCamelCase_ : Dict = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(_lowercase )
expected_callbacks.remove(_lowercase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _lowercase )
lowerCamelCase_ : Optional[Any] = self.get_trainer()
lowerCamelCase_ : Any = trainer.pop_callback(_lowercase )
self.assertEqual(cb.__class__ , _lowercase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _lowercase )
trainer.add_callback(_lowercase )
expected_callbacks.insert(0 , _lowercase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _lowercase )
# We can also add, pop, or remove by instance
lowerCamelCase_ : Optional[Any] = self.get_trainer()
lowerCamelCase_ : List[str] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(_lowercase )
expected_callbacks.remove(_lowercase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _lowercase )
lowerCamelCase_ : Dict = self.get_trainer()
lowerCamelCase_ : List[str] = trainer.callback_handler.callbacks[0]
lowerCamelCase_ : Tuple = trainer.pop_callback(_lowercase )
self.assertEqual(_lowercase , _lowercase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _lowercase )
trainer.add_callback(_lowercase )
expected_callbacks.insert(0 , _lowercase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _lowercase )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=_lowercase )
lowerCamelCase_ : int = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowerCamelCase_ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_lowercase , self.get_expected_events(_lowercase ) )
# Independent log/save/eval
lowerCamelCase_ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowerCamelCase_ : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_lowercase , self.get_expected_events(_lowercase ) )
lowerCamelCase_ : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowerCamelCase_ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_lowercase , self.get_expected_events(_lowercase ) )
lowerCamelCase_ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" )
trainer.train()
lowerCamelCase_ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_lowercase , self.get_expected_events(_lowercase ) )
lowerCamelCase_ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" )
trainer.train()
lowerCamelCase_ : List[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_lowercase , self.get_expected_events(_lowercase ) )
# A bit of everything
lowerCamelCase_ : Tuple = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
lowerCamelCase_ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_lowercase , self.get_expected_events(_lowercase ) )
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning" ) as warn_mock:
lowerCamelCase_ : int = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(_lowercase ) in warn_mock.call_args[0][0]
| 488
|
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Union[str, Any]:
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
SCREAMING_SNAKE_CASE__ : List[str] ='\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
@staticmethod
def a__ ( _lowercase ) -> Dict:
_lowerCamelCase : Any = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=_lowercase , required=_lowercase , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=_lowercase , required=_lowercase , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=_lowercase , required=_lowercase , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=_lowercase , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=_lowercase , default=_lowercase , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=_lowercase )
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , *_lowercase , ) -> str:
_lowerCamelCase : Tuple = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F'''Loading model {model_type}''' )
_lowerCamelCase : List[Any] = model_type
_lowerCamelCase : Union[str, Any] = tf_checkpoint
_lowerCamelCase : Tuple = pytorch_dump_output
_lowerCamelCase : Tuple = config
_lowerCamelCase : Optional[Any] = finetuning_task_name
def a__ ( self ) -> str:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_lowercase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
if "ckpt" in self._tf_checkpoint.lower():
_lowerCamelCase : Tuple = self._tf_checkpoint
_lowerCamelCase : int = ''''''
else:
_lowerCamelCase : List[str] = self._tf_checkpoint
_lowerCamelCase : str = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
_lowercase , self._config , self._pytorch_dump_output , _lowercase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_lowercase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 434
| 0
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def snake_case_ ( __lowercase , __lowercase , __lowercase , __lowercase ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
UpperCAmelCase_ : Optional[Any] = TOKENIZER_CLASSES
else:
UpperCAmelCase_ : Any = {tokenizer_name: getattr(__lowercase , tokenizer_name + '''Fast''' )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
UpperCAmelCase_ : str = TOKENIZER_CLASSES[tokenizer_name]
UpperCAmelCase_ : str = True
if checkpoint_name is None:
UpperCAmelCase_ : int = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCAmelCase_ : List[str] = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
UpperCAmelCase_ : List[str] = tokenizer_class.from_pretrained(__lowercase , force_download=__lowercase )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = checkpoint.split('''/''' )
UpperCAmelCase_ : List[Any] = os.path.join(__lowercase , __lowercase )
elif add_prefix:
UpperCAmelCase_ : Union[str, Any] = checkpoint
UpperCAmelCase_ : Tuple = dump_path
else:
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Union[str, Any] = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCAmelCase_ : Dict = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCAmelCase_ : Optional[Any] = file_path.split(__lowercase )[-1][0]
if next_char == "/":
UpperCAmelCase_ : Union[str, Any] = os.path.join(__lowercase , __lowercase )
UpperCAmelCase_ : Union[str, Any] = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
UpperCAmelCase_ : Optional[int] = tokenizer.save_pretrained(
__lowercase , legacy_format=__lowercase , filename_prefix=__lowercase )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith('''tokenizer.json''' ):
os.remove(__lowercase )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
F'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
__UpperCamelCase : List[str] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 641
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def snake_case_ ( ):
UpperCAmelCase_ : int = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
UpperCAmelCase_ : Dict = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert('''RGB''' )
return image
def snake_case_ ( __lowercase ):
UpperCAmelCase_ : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def snake_case_ ( __lowercase , __lowercase , __lowercase ):
UpperCAmelCase_ : Any = dct.pop(__lowercase )
UpperCAmelCase_ : Optional[Any] = val
def snake_case_ ( __lowercase , __lowercase ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCAmelCase_ : Any = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCAmelCase_ : int = torch.cat((q_bias, torch.zeros_like(__lowercase , requires_grad=__lowercase ), v_bias) )
UpperCAmelCase_ : List[str] = qkv_bias
def snake_case_ ( __lowercase , __lowercase ):
UpperCAmelCase_ : Optional[Any] = 3_6_4 if '''coco''' in model_name else 2_2_4
UpperCAmelCase_ : Any = BlipaVisionConfig(image_size=__lowercase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCAmelCase_ : Any = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=__lowercase ).to_dict()
elif "opt-6.7b" in model_name:
UpperCAmelCase_ : List[str] = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=__lowercase ).to_dict()
elif "t5-xl" in model_name:
UpperCAmelCase_ : List[str] = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase_ : Any = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
UpperCAmelCase_ : List[Any] = BlipaConfig(vision_config=__lowercase , text_config=__lowercase )
return config, image_size
@torch.no_grad()
def snake_case_ ( __lowercase , __lowercase=None , __lowercase=False ):
UpperCAmelCase_ : List[Any] = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
UpperCAmelCase_ : List[str] = tokenizer('''\n''' , add_special_tokens=__lowercase ).input_ids[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = get_blipa_config(__lowercase , eos_token_id=__lowercase )
UpperCAmelCase_ : List[Any] = BlipaForConditionalGeneration(__lowercase ).eval()
UpperCAmelCase_ : Tuple = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
UpperCAmelCase_ : int = '''cuda''' if torch.cuda.is_available() else '''cpu'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = load_model_and_preprocess(
name=__lowercase , model_type=__lowercase , is_eval=__lowercase , device=__lowercase )
original_model.eval()
print('''Done!''' )
# update state dict keys
UpperCAmelCase_ : Optional[Any] = original_model.state_dict()
UpperCAmelCase_ : List[Any] = create_rename_keys(__lowercase )
for src, dest in rename_keys:
rename_key(__lowercase , __lowercase , __lowercase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase_ : Union[str, Any] = state_dict.pop(__lowercase )
if key.startswith('''Qformer.bert''' ):
UpperCAmelCase_ : Tuple = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
UpperCAmelCase_ : Optional[Any] = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
UpperCAmelCase_ : Any = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
UpperCAmelCase_ : Tuple = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
UpperCAmelCase_ : Any = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
UpperCAmelCase_ : Optional[Any] = key.replace('''t5''' , '''language''' )
UpperCAmelCase_ : List[str] = val
# read in qv biases
read_in_q_v_bias(__lowercase , __lowercase )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = hf_model.load_state_dict(__lowercase , strict=__lowercase )
assert len(__lowercase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCAmelCase_ : str = load_demo_image()
UpperCAmelCase_ : Any = vis_processors['''eval'''](__lowercase ).unsqueeze(0 ).to(__lowercase )
UpperCAmelCase_ : Optional[Any] = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(__lowercase )
# create processor
UpperCAmelCase_ : Optional[int] = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=__lowercase , image_std=__lowercase )
UpperCAmelCase_ : Tuple = BlipaProcessor(image_processor=__lowercase , tokenizer=__lowercase )
UpperCAmelCase_ : str = processor(images=__lowercase , return_tensors='''pt''' ).pixel_values.to(__lowercase )
# make sure processor creates exact same pixel values
assert torch.allclose(__lowercase , __lowercase )
original_model.to(__lowercase )
hf_model.to(__lowercase )
with torch.no_grad():
if "opt" in model_name:
UpperCAmelCase_ : Tuple = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
UpperCAmelCase_ : Optional[int] = hf_model(__lowercase , __lowercase ).logits
else:
UpperCAmelCase_ : int = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
UpperCAmelCase_ : Optional[int] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
UpperCAmelCase_ : int = hf_model(__lowercase , __lowercase , labels=__lowercase ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-4_1.5_8_5_0, -4.4_4_4_0, -8.9_9_2_2], [-4_7.4_3_2_2, -5.9_1_4_3, -1.7_3_4_0]] , device=__lowercase )
assert torch.allclose(logits[0, :3, :3] , __lowercase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCAmelCase_ : Tuple = torch.tensor(
[[-5_7.0_1_0_9, -9.8_9_6_7, -1_2.6_2_8_0], [-6_8.6_5_7_8, -1_2.7_1_9_1, -1_0.5_0_6_5]] , device=__lowercase )
else:
# cast to same type
UpperCAmelCase_ : Optional[int] = logits.dtype
assert torch.allclose(original_logits.to(__lowercase ) , __lowercase , atol=1e-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
UpperCAmelCase_ : Union[str, Any] = ''''''
UpperCAmelCase_ : Optional[Any] = tokenizer(__lowercase , return_tensors='''pt''' ).input_ids.to(__lowercase )
UpperCAmelCase_ : int = original_model.generate({'''image''': original_pixel_values} )
UpperCAmelCase_ : Optional[int] = hf_model.generate(
__lowercase , __lowercase , do_sample=__lowercase , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , __lowercase )
UpperCAmelCase_ : Tuple = input_ids.shape[1]
UpperCAmelCase_ : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__lowercase )
UpperCAmelCase_ : Optional[int] = [text.strip() for text in output_text]
print('''HF generation:''' , __lowercase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowercase )
hf_model.save_pretrained(__lowercase )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
__UpperCamelCase : Optional[Any] = [
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
__UpperCamelCase : int = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 641
| 1
|
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_lowercase : Optional[int] ="""\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"""
_lowercase : Optional[Any] ="""\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"""
_lowercase : Optional[int] ="""\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ ( datasets.Metric ):
def __a ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def __a ( self : str , lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : List[Any]=None , lowerCamelCase : Any=True , lowerCamelCase : Any=False ):
if rouge_types is None:
lowerCamelCase_ : List[Any] = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
lowerCamelCase_ : Optional[Any] = rouge_scorer.RougeScorer(rouge_types=_lowercase , use_stemmer=_lowercase )
if use_aggregator:
lowerCamelCase_ : str = scoring.BootstrapAggregator()
else:
lowerCamelCase_ : str = []
for ref, pred in zip(_lowercase , _lowercase ):
lowerCamelCase_ : int = scorer.score(_lowercase , _lowercase )
if use_aggregator:
aggregator.add_scores(_lowercase )
else:
scores.append(_lowercase )
if use_aggregator:
lowerCamelCase_ : Any = aggregator.aggregate()
else:
lowerCamelCase_ : Optional[Any] = {}
for key in scores[0]:
lowerCamelCase_ : Dict = [score[key] for score in scores]
return result
| 364
|
"""simple docstring"""
__UpperCAmelCase = 256
# Modulus to hash a string
__UpperCAmelCase = 1_000_003
def lowercase__ ( lowerCamelCase : str , lowerCamelCase : str ) -> bool:
lowerCAmelCase__ : Optional[Any] = len(lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = len(lowerCamelCase )
if p_len > t_len:
return False
lowerCAmelCase__ : Any = 0
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Tuple = 1
# Calculating the hash of pattern and substring of text
for i in range(lowerCamelCase ):
lowerCAmelCase__ : Tuple = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowerCAmelCase__ : List[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowerCAmelCase__ : Optional[int] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowerCAmelCase__ : Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowercase__ ( ) -> None:
lowerCAmelCase__ : Optional[Any] = "abc1abc12"
lowerCAmelCase__ : Any = "alskfjaldsabc1abc1abc12k23adsfabcabc"
lowerCAmelCase__ : List[str] = "alskfjaldsk23adsfabcabc"
assert rabin_karp(lowerCamelCase , lowerCamelCase ) and not rabin_karp(lowerCamelCase , lowerCamelCase )
# Test 2)
lowerCAmelCase__ : str = "ABABX"
lowerCAmelCase__ : Union[str, Any] = "ABABZABABYABABX"
assert rabin_karp(lowerCamelCase , lowerCamelCase )
# Test 3)
lowerCAmelCase__ : Union[str, Any] = "AAAB"
lowerCAmelCase__ : Dict = "ABAAAAAB"
assert rabin_karp(lowerCamelCase , lowerCamelCase )
# Test 4)
lowerCAmelCase__ : int = "abcdabcy"
lowerCAmelCase__ : Tuple = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(lowerCamelCase , lowerCamelCase )
# Test 5)
lowerCAmelCase__ : Tuple = "Lü"
lowerCAmelCase__ : List[Any] = "Lüsai"
assert rabin_karp(lowerCamelCase , lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = "Lue"
assert not rabin_karp(lowerCamelCase , lowerCamelCase )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 308
| 0
|
'''simple docstring'''
from math import factorial
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Any ,a__ : List[Any] ,a__ : int ):
a__ = real
if isinstance(a__ ,a__ ):
a__ = [1] * rank
else:
a__ = rank
def __repr__( self : Dict ):
return (
f'{self.real}+'
f'{"+".join(str(a__ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def lowerCAmelCase_ ( self : Union[str, Any] ):
a__ = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real ,a__ )
def __add__( self : Dict ,a__ : Dict ):
if not isinstance(a__ ,a__ ):
return Dual(self.real + other ,self.duals )
a__ = self.duals.copy()
a__ = other.duals.copy()
if len(a__ ) > len(a__ ):
o_dual.extend([1] * (len(a__ ) - len(a__ )) )
elif len(a__ ) < len(a__ ):
s_dual.extend([1] * (len(a__ ) - len(a__ )) )
a__ = []
for i in range(len(a__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real ,a__ )
UpperCamelCase__ = __add__
def __sub__( self : Optional[Any] ,a__ : Dict ):
return self + other * -1
def __mul__( self : List[str] ,a__ : int ):
if not isinstance(a__ ,a__ ):
a__ = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other ,a__ )
a__ = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real ,a__ )
UpperCamelCase__ = __mul__
def __truediv__( self : Union[str, Any] ,a__ : Any ):
if not isinstance(a__ ,a__ ):
a__ = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other ,a__ )
raise ValueError
def __floordiv__( self : str ,a__ : Optional[int] ):
if not isinstance(a__ ,a__ ):
a__ = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other ,a__ )
raise ValueError
def __pow__( self : Union[str, Any] ,a__ : Optional[int] ):
if n < 0 or isinstance(a__ ,a__ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
a__ = self
for _ in range(n - 1 ):
x *= self
return x
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase ):
"""simple docstring"""
if not callable(_lowercase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(_lowercase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(_lowercase , _lowercase ):
raise ValueError("differentiate() requires an int as input for order" )
a__ = Dual(_lowercase , 1 )
a__ = func(_lowercase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 394
|
'''simple docstring'''
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
a__ = gray_code_sequence_string(_lowercase )
#
# convert them to integers
for i in range(len(_lowercase ) ):
a__ = int(sequence[i] , 2 )
return sequence
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
a__ = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
a__ = gray_code_sequence_string(bit_count - 1 )
a__ = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
a__ = "0" + smaller_sequence[i]
sequence.append(_lowercase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
a__ = "1" + smaller_sequence[i]
sequence.append(_lowercase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 394
| 1
|
def lowerCamelCase_ ( _UpperCamelCase ) -> Any:
"""simple docstring"""
snake_case_ : Dict = []
snake_case_ : int = set({'''(''', '''[''', '''{'''} )
snake_case_ : Dict = set({''')''', ''']''', '''}'''} )
snake_case_ : Dict = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(_UpperCamelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_UpperCamelCase ) == 0 or (len(_UpperCamelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_UpperCamelCase ) == 0
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
snake_case_ : Tuple = input('''Enter sequence of brackets: ''' )
if is_balanced(_UpperCamelCase ):
print(_UpperCamelCase , '''is balanced''' )
else:
print(_UpperCamelCase , '''is not balanced''' )
if __name__ == "__main__":
main()
| 60
|
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : str = TransfoXLTokenizer
snake_case__ : Union[str, Any] = False
snake_case__ : Union[str, Any] = False
def A__ ( self ):
"""simple docstring"""
super().setUp()
lowercase = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = """<unk> UNwanted , running"""
lowercase = """<unk> unwanted, running"""
return input_text, output_text
def A__ ( self ):
"""simple docstring"""
lowercase = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=__lowerCAmelCase )
lowercase = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(__lowerCAmelCase , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [0, 4, 8, 7] )
def A__ ( self ):
"""simple docstring"""
lowercase = TransfoXLTokenizer(lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def A__ ( self ):
"""simple docstring"""
lowercase = TransfoXLTokenizer(lower_case=__lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def A__ ( self ):
"""simple docstring"""
lowercase = TransfoXLTokenizer(lower_case=__lowerCAmelCase )
lowercase = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
lowercase = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(__lowerCAmelCase ) , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
lowercase = len(__lowerCAmelCase )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(__lowerCAmelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 359
| 0
|
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
A__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowerCAmelCase ( __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''' , __lowerCAmelCase , )
if isinstance(__lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
snake_case__ : int = [image]
if isinstance(image[0] , PIL.Image.Image ):
snake_case__ , snake_case__ : List[Any] = image[0].size
snake_case__ , snake_case__ : Any = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
snake_case__ : str = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) )[None, :] for i in image]
snake_case__ : str = np.concatenate(__lowerCAmelCase , axis=0 )
snake_case__ : Optional[Any] = np.array(__lowerCAmelCase ).astype(np.floataa ) / 255.0
snake_case__ : Tuple = image.transpose(0 , 3 , 1 , 2 )
snake_case__ : Union[str, Any] = 2.0 * image - 1.0
snake_case__ : str = torch.from_numpy(__lowerCAmelCase )
elif isinstance(image[0] , torch.Tensor ):
snake_case__ : Tuple = torch.cat(__lowerCAmelCase , dim=0 )
return image
def _lowerCAmelCase ( __lowerCAmelCase ) -> Any:
"""simple docstring"""
if isinstance(__lowerCAmelCase , torch.Tensor ):
return mask
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
snake_case__ : Union[str, Any] = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
snake_case__ , snake_case__ : List[Any] = mask[0].size
snake_case__ , snake_case__ : Dict = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
snake_case__ : Any = [np.array(m.convert('''L''' ).resize((w, h) , resample=PIL_INTERPOLATION['''nearest'''] ) )[None, :] for m in mask]
snake_case__ : Union[str, Any] = np.concatenate(__lowerCAmelCase , axis=0 )
snake_case__ : int = mask.astype(np.floataa ) / 255.0
snake_case__ : int = 0
snake_case__ : Dict = 1
snake_case__ : List[str] = torch.from_numpy(__lowerCAmelCase )
elif isinstance(mask[0] , torch.Tensor ):
snake_case__ : Dict = torch.cat(__lowerCAmelCase , dim=0 )
return mask
class a ( __lowerCamelCase ):
__lowerCAmelCase : UNetaDModel
__lowerCAmelCase : RePaintScheduler
def __init__( self :List[str] ,__lowercase :Dict ,__lowercase :Union[str, Any] ):
super().__init__()
self.register_modules(unet=__lowercase ,scheduler=__lowercase )
@torch.no_grad()
def __call__( self :str ,__lowercase :Union[torch.Tensor, PIL.Image.Image] ,__lowercase :Union[torch.Tensor, PIL.Image.Image] ,__lowercase :int = 2_5_0 ,__lowercase :float = 0.0 ,__lowercase :int = 1_0 ,__lowercase :int = 1_0 ,__lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None ,__lowercase :Optional[str] = "pil" ,__lowercase :bool = True ,):
snake_case__ : Tuple = image
snake_case__ : Optional[Any] = _preprocess_image(__lowercase )
snake_case__ : str = original_image.to(device=self.device ,dtype=self.unet.dtype )
snake_case__ : List[str] = _preprocess_mask(__lowercase )
snake_case__ : Dict = mask_image.to(device=self.device ,dtype=self.unet.dtype )
snake_case__ : int = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__lowercase ,__lowercase ) and len(__lowercase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__lowercase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
snake_case__ : Optional[int] = original_image.shape
snake_case__ : List[Any] = randn_tensor(__lowercase ,generator=__lowercase ,device=self.device ,dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__lowercase ,__lowercase ,__lowercase ,self.device )
snake_case__ : str = eta
snake_case__ : str = self.scheduler.timesteps[0] + 1
snake_case__ : List[Any] = generator[0] if isinstance(__lowercase ,__lowercase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
snake_case__ : List[str] = self.unet(__lowercase ,__lowercase ).sample
# compute previous image: x_t -> x_t-1
snake_case__ : Union[str, Any] = self.scheduler.step(__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
snake_case__ : Optional[Any] = self.scheduler.undo_step(__lowercase ,__lowercase ,__lowercase )
snake_case__ : int = t
snake_case__ : Union[str, Any] = (image / 2 + 0.5).clamp(0 ,1 )
snake_case__ : Optional[int] = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
snake_case__ : Dict = self.numpy_to_pil(__lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowercase )
| 219
|
def _lowerCAmelCase ( __lowerCAmelCase ) -> float:
"""simple docstring"""
if edge <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _lowerCAmelCase ( __lowerCAmelCase ) -> float:
"""simple docstring"""
if edge <= 0 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219
| 1
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : int = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = "longformer"
def __init__( self : int , A_ : Union[List[int], int] = 512 , A_ : int = 2 , A_ : int = 1 , A_ : int = 0 , A_ : int = 2 , A_ : int = 30522 , A_ : int = 768 , A_ : int = 12 , A_ : int = 12 , A_ : int = 3072 , A_ : str = "gelu" , A_ : float = 0.1 , A_ : float = 0.1 , A_ : int = 512 , A_ : int = 2 , A_ : float = 0.02 , A_ : float = 1E-12 , A_ : bool = False , **A_ : str , ) -> int:
"""simple docstring"""
super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase_ = attention_window
lowerCamelCase_ = sep_token_id
lowerCamelCase_ = bos_token_id
lowerCamelCase_ = eos_token_id
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = onnx_export
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : List[str] , A_ : "PretrainedConfig" , A_ : str = "default" , A_ : "List[PatchingSpec]" = None ) -> Dict:
"""simple docstring"""
super().__init__(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowerCamelCase_ = True
@property
def a__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if self.task == "multiple-choice":
lowerCamelCase_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCamelCase_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = super().outputs
if self.task == "default":
lowerCamelCase_ = {0: """batch"""}
return outputs
@property
def a__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return 1E-4
@property
def a__ ( self : Any ) -> List[str]:
"""simple docstring"""
return max(super().default_onnx_opset , 14 )
def a__ ( self : Any , A_ : "PreTrainedTokenizerBase" , A_ : int = -1 , A_ : int = -1 , A_ : bool = False , A_ : Optional[TensorType] = None , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = super().generate_dummy_inputs(
preprocessor=_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCamelCase_ = torch.zeros_like(inputs['input_ids'] )
# make every second token global
lowerCamelCase_ = 1
return inputs
| 70
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : int = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__UpperCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 519
| 0
|
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _lowercase ( a__ : dict ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def _lowercase ( a__ : np.ndarray , a__ : np.ndarray , a__ : np.ndarray ) -> np.ndarray:
"""simple docstring"""
_UpperCamelCase = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(a__ , a__ )
# Predict target for test data
_UpperCamelCase = xgb.predict(a__ )
_UpperCamelCase = predictions.reshape(len(a__ ) , 1 )
return predictions
def _lowercase ( ) -> None:
"""simple docstring"""
_UpperCamelCase = fetch_california_housing()
_UpperCamelCase , _UpperCamelCase = data_handling(a__ )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = train_test_split(
a__ , a__ , test_size=0.25 , random_state=1 )
_UpperCamelCase = xgboost(a__ , a__ , a__ )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(a__ , a__ )}''' )
print(f'''Mean Square Error : {mean_squared_error(a__ , a__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 702
|
import os
from math import logaa
def _lowercase ( a__ : str = "base_exp.txt" ) -> int:
"""simple docstring"""
_UpperCamelCase = 0
_UpperCamelCase = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(a__ ) , a__ ) ) ):
_UpperCamelCase , _UpperCamelCase = list(map(a__ , line.split("," ) ) )
if x * logaa(a__ ) > largest:
_UpperCamelCase = x * logaa(a__ )
_UpperCamelCase = i + 1
return result
if __name__ == "__main__":
print(solution())
| 589
| 0
|
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
SCREAMING_SNAKE_CASE__ : List[Any] = [{"type": "code", "content": INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE__ : str = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 85
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self ):
"""simple docstring"""
snake_case_ :List[Any] = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
snake_case_ :Dict = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
snake_case_ :int = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
snake_case_ :Tuple = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case_ :Any = model(a )["last_hidden_state"].detach()
self.assertEqual(output.shape , a )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , a , atol=1e-3 ) )
@slow
def _a ( self ):
"""simple docstring"""
snake_case_ :Dict = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
snake_case_ :List[str] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
snake_case_ :Any = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
snake_case_ :Tuple = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case_ :str = model(a )["last_hidden_state"].detach()
self.assertEqual(output.shape , a )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , a , atol=1e-3 ) )
| 584
| 0
|
'''simple docstring'''
import os
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: List[Any] = os.path.dirname(os.path.realpath(__UpperCamelCase ) )
snake_case: int = os.path.join(__UpperCamelCase , 'triangle.txt' )
with open(__UpperCamelCase ) as f:
snake_case: Dict = f.readlines()
snake_case: Optional[Any] = []
for line in triangle:
snake_case: Optional[Any] = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(__UpperCamelCase ) )
a.append(__UpperCamelCase )
for i in range(1 , len(__UpperCamelCase ) ):
for j in range(len(a[i] ) ):
snake_case: Optional[Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
snake_case: List[Any] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__UpperCamelCase , __UpperCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 711
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = "▁"
__UpperCAmelCase = {"vocab_file": "sentencepiece.bpe.model"}
__UpperCAmelCase = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
__UpperCAmelCase = {
"facebook/xglm-564M": 2_048,
}
class SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
'''simple docstring'''
snake_case: Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
snake_case: Optional[Any] = 7
snake_case: List[str] = [F"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
snake_case: str = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
snake_case: int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
snake_case: int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case: Tuple = 1
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case: Optional[Any] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
snake_case: Union[str, Any] = len(self.sp_model )
snake_case: str = {F"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE__ )
snake_case: Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
'''simple docstring'''
snake_case: List[Any] = self.__dict__.copy()
snake_case: Union[str, Any] = None
snake_case: Union[str, Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case: Union[str, Any] = {}
snake_case: Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
snake_case: Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ ))
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
snake_case: int = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case: Dict = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
snake_case: Optional[Any] = ''.join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , ' ' ).strip()
return out_string
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case: List[str] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , 'wb' ) as fi:
snake_case: int = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 692
| 0
|
from math import sqrt
def __lowerCAmelCase ( __snake_case ):
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' must been an int and positive"
__lowerCAmelCase = True
# 0 and 1 are none primes.
if number <= 1:
__lowerCAmelCase = False
for divisor in range(2 , int(round(sqrt(A__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
__lowerCAmelCase = False
break
# precondition
assert isinstance(A__ , A__ ), "'status' must been from type bool"
return status
def __lowerCAmelCase ( __snake_case ):
assert isinstance(A__ , A__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
__lowerCAmelCase = list(range(2 , n + 1 ) )
__lowerCAmelCase = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(A__ ) ):
for j in range(i + 1 , len(A__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
__lowerCAmelCase = 0
# filters actual prime numbers.
__lowerCAmelCase = [x for x in begin_list if x != 0]
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def __lowerCAmelCase ( __snake_case ):
assert isinstance(A__ , A__ ) and (n > 2), "'N' must been an int and > 2"
__lowerCAmelCase = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(A__ ):
ans.append(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def __lowerCAmelCase ( __snake_case ):
assert isinstance(A__ , A__ ) and number >= 0, "'number' must been an int and >= 0"
__lowerCAmelCase = [] # this list will be returns of the function.
# potential prime number factors.
__lowerCAmelCase = 2
__lowerCAmelCase = number
if number == 0 or number == 1:
ans.append(A__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(A__ ):
while quotient != 1:
if is_prime(A__ ) and (quotient % factor == 0):
ans.append(A__ )
quotient /= factor
else:
factor += 1
else:
ans.append(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type list"
return ans
def __lowerCAmelCase ( __snake_case ):
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCAmelCase = 0
# prime factorization of 'number'
__lowerCAmelCase = prime_factorization(A__ )
__lowerCAmelCase = max(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type int"
return ans
def __lowerCAmelCase ( __snake_case ):
assert isinstance(A__ , A__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
__lowerCAmelCase = 0
# prime factorization of 'number'
__lowerCAmelCase = prime_factorization(A__ )
__lowerCAmelCase = min(A__ )
# precondition
assert isinstance(A__ , A__ ), "'ans' must been from type int"
return ans
def __lowerCAmelCase ( __snake_case ):
assert isinstance(A__ , A__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , A__ ), "compare bust been from type bool"
return number % 2 == 0
def __lowerCAmelCase ( __snake_case ):
assert isinstance(A__ , A__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , A__ ), "compare bust been from type bool"
return number % 2 != 0
def __lowerCAmelCase ( __snake_case ):
assert (
isinstance(A__ , A__ ) and (number > 2) and is_even(A__ )
), "'number' must been an int, even and > 2"
__lowerCAmelCase = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
__lowerCAmelCase = get_prime_numbers(A__ )
__lowerCAmelCase = len(A__ )
# run variable for while-loops.
__lowerCAmelCase = 0
__lowerCAmelCase = None
# exit variable. for break up the loops
__lowerCAmelCase = True
while i < len_pn and loop:
__lowerCAmelCase = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
__lowerCAmelCase = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(A__ , A__ )
and (len(A__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def __lowerCAmelCase ( __snake_case , __snake_case ):
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
__lowerCAmelCase = 0
while numbera != 0:
__lowerCAmelCase = numbera % numbera
__lowerCAmelCase = numbera
__lowerCAmelCase = rest
# precondition
assert isinstance(A__ , A__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def __lowerCAmelCase ( __snake_case , __snake_case ):
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
__lowerCAmelCase = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
__lowerCAmelCase = prime_factorization(A__ )
__lowerCAmelCase = prime_factorization(A__ )
elif numbera == 1 or numbera == 1:
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = max(A__ , A__ )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
__lowerCAmelCase = prime_fac_a.count(A__ )
__lowerCAmelCase = prime_fac_a.count(A__ )
for _ in range(max(A__ , A__ ) ):
ans *= n
else:
__lowerCAmelCase = prime_fac_a.count(A__ )
for _ in range(A__ ):
ans *= n
done.append(A__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
__lowerCAmelCase = prime_fac_a.count(A__ )
for _ in range(A__ ):
ans *= n
done.append(A__ )
# precondition
assert isinstance(A__ , A__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def __lowerCAmelCase ( __snake_case ):
assert isinstance(A__ , A__ ) and (n >= 0), "'number' must been a positive int"
__lowerCAmelCase = 0
__lowerCAmelCase = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(A__ ):
ans += 1
# precondition
assert isinstance(A__ , A__ ) and is_prime(
A__ ), "'ans' must been a prime number and from type int"
return ans
def __lowerCAmelCase ( __snake_case , __snake_case ):
assert (
is_prime(A__ ) and is_prime(A__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
__lowerCAmelCase = p_number_a + 1 # jump to the next number
__lowerCAmelCase = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(A__ ):
number += 1
while number < p_number_a:
ans.append(A__ )
number += 1
# fetch the next prime number.
while not is_prime(A__ ):
number += 1
# precondition
assert (
isinstance(A__ , A__ )
and ans[0] != p_number_a
and ans[len(A__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def __lowerCAmelCase ( __snake_case ):
assert isinstance(A__ , A__ ) and (n >= 1), "'n' must been int and >= 1"
__lowerCAmelCase = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(A__ )
# precondition
assert ans[0] == 1 and ans[len(A__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def __lowerCAmelCase ( __snake_case ):
assert isinstance(A__ , A__ ) and (
number > 1
), "'number' must been an int and >= 1"
__lowerCAmelCase = get_divisors(A__ )
# precondition
assert (
isinstance(A__ , A__ )
and (divisors[0] == 1)
and (divisors[len(A__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def __lowerCAmelCase ( __snake_case , __snake_case ):
assert (
isinstance(A__ , A__ )
and isinstance(A__ , A__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
__lowerCAmelCase = gcd(abs(A__ ) , abs(A__ ) )
# precondition
assert (
isinstance(A__ , A__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def __lowerCAmelCase ( __snake_case ):
assert isinstance(A__ , A__ ) and (n >= 0), "'n' must been a int and >= 0"
__lowerCAmelCase = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def __lowerCAmelCase ( __snake_case ):
assert isinstance(A__ , A__ ) and (n >= 0), "'n' must been an int and >= 0"
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = 1 # this will be return
for _ in range(n - 1 ):
__lowerCAmelCase = ans
ans += fiba
__lowerCAmelCase = tmp
return ans
| 367
|
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Optional[int] = CpmAntTokenizer
A : Optional[int] = False
def UpperCAmelCase ( self : List[str]) -> str:
"""simple docstring"""
super().setUp()
lowercase__ = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
@tooslow
def UpperCAmelCase ( self : str) -> Optional[Any]:
"""simple docstring"""
lowercase__ = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b')
lowercase__ = '今天天气真好!'
lowercase__ = ['今天', '天气', '真', '好', '!']
lowercase__ = tokenizer.tokenize(lowerCAmelCase)
self.assertListEqual(lowerCAmelCase , lowerCAmelCase)
lowercase__ = '今天天气真好!'
lowercase__ = [tokenizer.bos_token] + tokens
lowercase__ = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase) , lowerCAmelCase)
lowercase__ = tokenizer.decode(lowerCAmelCase)
self.assertEqual(lowerCAmelCase , lowerCAmelCase)
| 622
| 0
|
'''simple docstring'''
def snake_case__ ( _A: int ) -> str:
'''simple docstring'''
lowerCAmelCase = int(_A )
if decimal in (0, 1): # Exit cases for the recursion
return str(_A )
lowerCAmelCase , lowerCAmelCase = divmod(_A , 2 )
return binary_recursive(_A ) + str(_A )
def snake_case__ ( _A: str ) -> str:
'''simple docstring'''
lowerCAmelCase = str(_A ).strip()
if not number:
raise ValueError("""No input value was provided""" )
lowerCAmelCase = """-""" if number.startswith("""-""" ) else """"""
lowerCAmelCase = number.lstrip("""-""" )
if not number.isnumeric():
raise ValueError("""Input value is not an integer""" )
return f"{negative}0b{binary_recursive(int(_A ) )}"
if __name__ == "__main__":
from doctest import testmod
testmod()
| 605
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__lowercase = logging.getLogger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = '''sequence-classification'''
def __init__( self , __lowerCAmelCase):
"""simple docstring"""
if type(__lowerCAmelCase) == dict:
lowerCAmelCase = Namespace(**__lowerCAmelCase)
lowerCAmelCase = glue_output_modes[hparams.task]
lowerCAmelCase = glue_tasks_num_labels[hparams.task]
super().__init__(__lowerCAmelCase , __lowerCAmelCase , self.mode)
def a_ ( self , **__lowerCAmelCase):
"""simple docstring"""
return self.model(**__lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowerCAmelCase = self(**__lowerCAmelCase)
lowerCAmelCase = outputs[0]
lowerCAmelCase = self.trainer.lr_schedulers[0]["""scheduler"""]
lowerCAmelCase = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = self.hparams
lowerCAmelCase = processors[args.task]()
lowerCAmelCase = processor.get_labels()
for mode in ["train", "dev"]:
lowerCAmelCase = self._feature_file(__lowerCAmelCase)
if os.path.exists(__lowerCAmelCase) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase)
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir)
lowerCAmelCase = (
processor.get_dev_examples(args.data_dir)
if mode == """dev"""
else processor.get_train_examples(args.data_dir)
)
lowerCAmelCase = convert_examples_to_features(
__lowerCAmelCase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , __lowerCAmelCase)
torch.save(__lowerCAmelCase , __lowerCAmelCase)
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False):
"""simple docstring"""
lowerCAmelCase = """dev""" if mode == """test""" else mode
lowerCAmelCase = self._feature_file(__lowerCAmelCase)
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase)
lowerCAmelCase = torch.load(__lowerCAmelCase)
lowerCAmelCase = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
lowerCAmelCase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
lowerCAmelCase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.long)
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.float)
return DataLoader(
TensorDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase) , batch_size=__lowerCAmelCase , shuffle=__lowerCAmelCase , )
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
lowerCAmelCase = self(**__lowerCAmelCase)
lowerCAmelCase , lowerCAmelCase = outputs[:2]
lowerCAmelCase = logits.detach().cpu().numpy()
lowerCAmelCase = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase = torch.stack([x["""val_loss"""] for x in outputs]).mean().detach().cpu().item()
lowerCAmelCase = np.concatenate([x["""pred"""] for x in outputs] , axis=0)
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase = np.argmax(__lowerCAmelCase , axis=1)
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase = np.squeeze(__lowerCAmelCase)
lowerCAmelCase = np.concatenate([x["""target"""] for x in outputs] , axis=0)
lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])]
lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0])]
lowerCAmelCase = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , __lowerCAmelCase , __lowerCAmelCase)}
lowerCAmelCase = dict(results.items())
lowerCAmelCase = results
return ret, preds_list, out_label_list
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase)
lowerCAmelCase = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def a_ ( self , __lowerCAmelCase):
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self._eval_end(__lowerCAmelCase)
lowerCAmelCase = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def a_ ( __lowerCAmelCase , __lowerCAmelCase):
"""simple docstring"""
BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase)
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__lowerCAmelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__lowerCAmelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""")
return parser
def snake_case__ ( ) -> str:
'''simple docstring'''
lowerCAmelCase = argparse.ArgumentParser()
add_generic_args(_A , os.getcwd() )
lowerCAmelCase = GLUETransformer.add_model_specific_args(_A , os.getcwd() )
lowerCAmelCase = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCAmelCase = os.path.join(
"""./results""" , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
lowerCAmelCase = GLUETransformer(_A )
lowerCAmelCase = generic_train(_A , _A )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=_A ) )
lowerCAmelCase = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_A )
if __name__ == "__main__":
main()
| 605
| 1
|
'''simple docstring'''
import re
from ..utils import cached_file
# docstyle-ignore
lowerCAmelCase : int = """\nHuman: <<task>>\n\nAssistant: """
lowerCAmelCase : Optional[Any] = """huggingface-tools/default-prompts"""
lowerCAmelCase : Optional[Any] = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""}
def lowercase (_A , _A , _A="run" ):
"""simple docstring"""
if prompt_or_repo_id is None:
_lowerCAmelCase : List[Any] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('\\s' , _snake_case ) is not None:
return prompt_or_repo_id
_lowerCAmelCase : Dict = cached_file(
_snake_case , PROMPT_FILES[mode] , repo_type='dataset' , user_agent={'agent': agent_name} )
with open(_snake_case , 'r' , encoding='utf-8' ) as f:
return f.read()
| 444
|
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
SCREAMING_SNAKE_CASE__ = None
try:
import msvcrt
except ImportError:
SCREAMING_SNAKE_CASE__ = None
try:
import fcntl
except ImportError:
SCREAMING_SNAKE_CASE__ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
SCREAMING_SNAKE_CASE__ = OSError
# Data
# ------------------------------------------------
SCREAMING_SNAKE_CASE__ = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
SCREAMING_SNAKE_CASE__ = "3.0.12"
SCREAMING_SNAKE_CASE__ = None
def lowerCamelCase ( ):
'''simple docstring'''
global _logger
lowercase__ = _logger or logging.getLogger(__name__ )
return _logger
class snake_case (UpperCamelCase ):
def __init__( self ,UpperCAmelCase_ ) -> int:
lowercase__ = lock_file
return None
def __str__( self ) -> Union[str, Any]:
lowercase__ = F'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class snake_case :
def __init__( self ,UpperCAmelCase_ ) -> List[Any]:
lowercase__ = lock
return None
def __enter__( self ) -> Optional[int]:
return self.lock
def __exit__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> int:
self.lock.release()
return None
class snake_case :
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=-1 ,UpperCAmelCase_=None ) -> Tuple:
lowercase__ = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
lowercase__ = self.hash_filename_if_too_long(UpperCAmelCase_ ,UpperCAmelCase_ )
# The path to the lock file.
lowercase__ = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowercase__ = None
# The default timeout value.
lowercase__ = timeout
# We use this lock primarily for the lock counter.
lowercase__ = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowercase__ = 0
return None
@property
def _a ( self ) -> List[str]:
return self._lock_file
@property
def _a ( self ) -> Optional[int]:
return self._timeout
@timeout.setter
def _a ( self ,UpperCAmelCase_ ) -> Optional[Any]:
lowercase__ = float(UpperCAmelCase_ )
return None
def _a ( self ) -> Optional[Any]:
raise NotImplementedError()
def _a ( self ) -> Optional[int]:
raise NotImplementedError()
@property
def _a ( self ) -> Dict:
return self._lock_file_fd is not None
def _a ( self ,UpperCAmelCase_=None ,UpperCAmelCase_=0.05 ) -> Optional[Any]:
# Use the default timeout, if no timeout is provided.
if timeout is None:
lowercase__ = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowercase__ = id(self )
lowercase__ = self._lock_file
lowercase__ = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(UpperCAmelCase_ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowercase__ = max(0 ,self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def _a ( self ,UpperCAmelCase_=False ) -> List[Any]:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowercase__ = id(self )
lowercase__ = self._lock_file
logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
lowercase__ = 0
logger().debug(F'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self ) -> Dict:
self.acquire()
return self
def __exit__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> int:
self.release()
return None
def __del__( self ) -> Union[str, Any]:
self.release(force=UpperCAmelCase_ )
return None
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> str:
lowercase__ = os.path.basename(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > max_length and max_length > 0:
lowercase__ = os.path.dirname(UpperCAmelCase_ )
lowercase__ = str(hash(UpperCAmelCase_ ) )
lowercase__ = filename[: max_length - len(UpperCAmelCase_ ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(UpperCAmelCase_ ,UpperCAmelCase_ )
else:
return path
class snake_case (UpperCamelCase ):
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=-1 ,UpperCAmelCase_=None ) -> Dict:
from .file_utils import relative_to_absolute_path
super().__init__(UpperCAmelCase_ ,timeout=UpperCAmelCase_ ,max_filename_length=UpperCAmelCase_ )
lowercase__ = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def _a ( self ) -> List[str]:
lowercase__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowercase__ = os.open(self._lock_file ,UpperCAmelCase_ )
except OSError:
pass
else:
try:
msvcrt.locking(UpperCAmelCase_ ,msvcrt.LK_NBLCK ,1 )
except OSError:
os.close(UpperCAmelCase_ )
else:
lowercase__ = fd
return None
def _a ( self ) -> Any:
lowercase__ = self._lock_file_fd
lowercase__ = None
msvcrt.locking(UpperCAmelCase_ ,msvcrt.LK_UNLCK ,1 )
os.close(UpperCAmelCase_ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class snake_case (UpperCamelCase ):
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=-1 ,UpperCAmelCase_=None ) -> int:
lowercase__ = os.statvfs(os.path.dirname(UpperCAmelCase_ ) ).f_namemax
super().__init__(UpperCAmelCase_ ,timeout=UpperCAmelCase_ ,max_filename_length=UpperCAmelCase_ )
def _a ( self ) -> List[str]:
lowercase__ = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowercase__ = os.open(self._lock_file ,UpperCAmelCase_ )
try:
fcntl.flock(UpperCAmelCase_ ,fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(UpperCAmelCase_ )
else:
lowercase__ = fd
return None
def _a ( self ) -> int:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
lowercase__ = self._lock_file_fd
lowercase__ = None
fcntl.flock(UpperCAmelCase_ ,fcntl.LOCK_UN )
os.close(UpperCAmelCase_ )
return None
class snake_case (UpperCamelCase ):
def _a ( self ) -> Optional[Any]:
lowercase__ = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowercase__ = os.open(self._lock_file ,UpperCAmelCase_ )
except OSError:
pass
else:
lowercase__ = fd
return None
def _a ( self ) -> Tuple:
os.close(self._lock_file_fd )
lowercase__ = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
SCREAMING_SNAKE_CASE__ = None
if msvcrt:
SCREAMING_SNAKE_CASE__ = WindowsFileLock
elif fcntl:
SCREAMING_SNAKE_CASE__ = UnixFileLock
else:
SCREAMING_SNAKE_CASE__ = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 267
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ ={'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'OPT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OPTForCausalLM',
'OPTModel',
'OPTPreTrainedModel',
'OPTForSequenceClassification',
'OPTForQuestionAnswering',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'FlaxOPTForCausalLM',
'FlaxOPTModel',
'FlaxOPTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 719
|
from collections import deque
class UpperCamelCase__ :
def __init__(self : str , snake_case_ : str , snake_case_ : int , snake_case_ : int ):
__a : Optional[Any] = process_name # process name
__a : Optional[Any] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__a : Union[str, Any] = arrival_time
__a : int = burst_time # remaining burst time
__a : Dict = 0 # total time of the process wait in ready queue
__a : Union[str, Any] = 0 # time from arrival time to completion time
class UpperCamelCase__ :
def __init__(self : Optional[Any] , snake_case_ : int , snake_case_ : list[int] , snake_case_ : deque[Process] , snake_case_ : int , ):
# total number of mlfq's queues
__a : Tuple = number_of_queues
# time slice of queues that round robin algorithm applied
__a : Optional[int] = time_slices
# unfinished process is in this ready_queue
__a : Optional[int] = queue
# current time
__a : List[Any] = current_time
# finished process is in this sequence queue
__a : deque[Process] = deque()
def lowerCAmelCase (self : Dict ):
__a : Tuple = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def lowerCAmelCase (self : List[Any] , snake_case_ : list[Process] ):
__a : Optional[int] = []
for i in range(len(snake_case_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def lowerCAmelCase (self : Optional[Any] , snake_case_ : list[Process] ):
__a : Optional[int] = []
for i in range(len(snake_case_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def lowerCAmelCase (self : Optional[Any] , snake_case_ : list[Process] ):
__a : Any = []
for i in range(len(snake_case_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def lowerCAmelCase (self : List[Any] , snake_case_ : deque[Process] ):
return [q.burst_time for q in queue]
def lowerCAmelCase (self : int , snake_case_ : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def lowerCAmelCase (self : Tuple , snake_case_ : deque[Process] ):
__a : deque[Process] = deque() # sequence deque of finished process
while len(snake_case_ ) != 0:
__a : Any = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(snake_case_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__a : Dict = 0
# set the process's turnaround time because it is finished
__a : Tuple = self.current_time - cp.arrival_time
# set the completion time
__a : Dict = self.current_time
# add the process to queue that has finished queue
finished.append(snake_case_ )
self.finish_queue.extend(snake_case_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def lowerCAmelCase (self : List[Any] , snake_case_ : deque[Process] , snake_case_ : int ):
__a : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(snake_case_ ) ):
__a : Optional[Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(snake_case_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__a : Dict = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(snake_case_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__a : Dict = 0
# set the finish time
__a : Union[str, Any] = self.current_time
# update the process' turnaround time because it is finished
__a : List[Any] = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(snake_case_ )
self.finish_queue.extend(snake_case_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def lowerCAmelCase (self : Optional[Any] ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__a , __a : str = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
lowercase__ =Process('P1', 0, 53)
lowercase__ =Process('P2', 0, 17)
lowercase__ =Process('P3', 0, 68)
lowercase__ =Process('P4', 0, 24)
lowercase__ =3
lowercase__ =[17, 25]
lowercase__ =deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'queue': deque([Pa, Pa, Pa, Pa])})
lowercase__ =Process('P1', 0, 53)
lowercase__ =Process('P2', 0, 17)
lowercase__ =Process('P3', 0, 68)
lowercase__ =Process('P4', 0, 24)
lowercase__ =3
lowercase__ =[17, 25]
lowercase__ =deque([Pa, Pa, Pa, Pa])
lowercase__ =MLFQ(number_of_queues, time_slices, queue, 0)
lowercase__ =mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
F"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 326
| 0
|
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
try:
_a : Any = float(UpperCamelCase__ )
except ValueError:
raise ValueError("""Please enter a valid number""" )
_a : Any = decimal - int(UpperCamelCase__ )
if fractional_part == 0:
return int(UpperCamelCase__ ), 1
else:
_a : Any = len(str(UpperCamelCase__ ).split(""".""" )[1] )
_a : Dict = int(decimal * (1_0**number_of_frac_digits) )
_a : Dict = 1_0**number_of_frac_digits
_a , _a : Union[str, Any] = denominator, numerator
while True:
_a : Union[str, Any] = dividend % divisor
if remainder == 0:
break
_a , _a : Union[str, Any] = divisor, remainder
_a , _a : str = numerator / divisor, denominator / divisor
return int(UpperCamelCase__ ), int(UpperCamelCase__ )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction('67') = }''')
print(F'''{decimal_to_fraction('45.0') = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction('6.25') = }''')
print(F'''{decimal_to_fraction('78td') = }''')
| 389
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
return (
num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den
)
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[Any] = []
_a : str = 1_1
_a : List[Any] = int("""1""" + """0""" * digit_len )
for num in range(UpperCamelCase__ , UpperCamelCase__ ):
while den <= 9_9:
if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0):
if is_digit_cancelling(UpperCamelCase__ , UpperCamelCase__ ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
_a : Dict = 1_0
return solutions
def lowerCAmelCase__ ( UpperCamelCase__ = 2 ):
'''simple docstring'''
_a : Optional[int] = 1.0
for fraction in fraction_list(UpperCamelCase__ ):
_a : List[Any] = Fraction(UpperCamelCase__ )
result *= frac.denominator / frac.numerator
return int(UpperCamelCase__ )
if __name__ == "__main__":
print(solution())
| 389
| 1
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
lowerCAmelCase__ : int = logging.get_logger(__name__)
def UpperCamelCase__ ( A__ , A__ , A__ , A__=None , A__=None ) -> List[Any]:
# Recurse if needed
if "." in tensor_name:
snake_case__ : Dict = tensor_name.split('.' )
for split in splits[:-1]:
snake_case__ : int = getattr(_lowercase , _lowercase )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
snake_case__ : List[str] = new_module
snake_case__ : Dict = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
snake_case__ : Union[str, Any] = tensor_name in module._buffers
snake_case__ : Optional[Any] = getattr(_lowercase , _lowercase )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
snake_case__ : Optional[int] = False
snake_case__ : Dict = False
if is_buffer or not is_bitsandbytes_available():
snake_case__ : List[str] = False
snake_case__ : List[str] = False
else:
snake_case__ : Dict = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
snake_case__ : Any = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
snake_case__ : List[str] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
snake_case__ : Any = old_value.to(_lowercase )
elif isinstance(_lowercase , torch.Tensor ):
snake_case__ : List[Any] = value.to('cpu' )
if value.dtype == torch.inta:
snake_case__ : List[Any] = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
snake_case__ : Dict = torch.tensor(_lowercase , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , _lowercase ) and fpaa_statistics is None:
snake_case__ : Optional[int] = new_value.T
snake_case__ : Dict = old_value.__dict__
if is_abit:
snake_case__ : List[Any] = bnb.nn.IntaParams(_lowercase , requires_grad=_lowercase , **_lowercase ).to(_lowercase )
elif is_abit:
snake_case__ : str = bnb.nn.Paramsabit(_lowercase , requires_grad=_lowercase , **_lowercase ).to(_lowercase )
snake_case__ : Tuple = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(_lowercase ) )
else:
if value is None:
snake_case__ : Union[str, Any] = old_value.to(_lowercase )
elif isinstance(_lowercase , torch.Tensor ):
snake_case__ : List[Any] = value.to(_lowercase )
else:
snake_case__ : List[Any] = torch.tensor(_lowercase , device=_lowercase )
if is_buffer:
snake_case__ : List[Any] = new_value
else:
snake_case__ : Dict = nn.Parameter(_lowercase , requires_grad=old_value.requires_grad )
snake_case__ : Dict = new_value
def UpperCamelCase__ ( A__ , A__=None , A__=None , A__=None , A__=False ) -> Tuple:
for name, module in model.named_children():
if current_key_name is None:
snake_case__ : List[str] = []
current_key_name.append(_lowercase )
if (isinstance(_lowercase , nn.Linear ) or isinstance(_lowercase , _lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(_lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowercase , _lowercase ):
snake_case__ : Optional[Any] = module.weight.shape
else:
snake_case__ : Tuple = module.in_features
snake_case__ : Optional[int] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
snake_case__ : str = bnb.nn.LinearabitLt(
_lowercase , _lowercase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
snake_case__ : Optional[int] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
snake_case__ : List[Any] = bnb.nn.Linearabit(
_lowercase , _lowercase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
snake_case__ : int = True
# Store the module class in case we need to transpose the weight later
snake_case__ : Union[str, Any] = type(_lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowercase )
if len(list(module.children() ) ) > 0:
snake_case__ : Union[str, Any] = _replace_with_bnb_linear(
_lowercase , _lowercase , _lowercase , _lowercase , has_been_replaced=_lowercase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCamelCase__ ( A__ , A__=None , A__=None , A__=None ) -> Union[str, Any]:
snake_case__ : Union[str, Any] = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
snake_case__ : Union[str, Any] = _replace_with_bnb_linear(
_lowercase , _lowercase , _lowercase , _lowercase )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def UpperCamelCase__ ( *A__ , **A__ ) -> List[Any]:
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , _lowercase , )
return replace_with_bnb_linear(*_lowercase , **_lowercase )
def UpperCamelCase__ ( *A__ , **A__ ) -> Optional[Any]:
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , _lowercase , )
return set_module_quantized_tensor_to_device(*_lowercase , **_lowercase )
def UpperCamelCase__ ( A__ ) -> Any:
snake_case__ : Tuple = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
snake_case__ : str = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase , _lowercase ):
snake_case__ : Union[str, Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
snake_case__ : Optional[int] = sum(_lowercase , [] )
snake_case__ : Optional[int] = len(_lowercase ) > 0
# Check if it is a base model
snake_case__ : List[Any] = not hasattr(_lowercase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
snake_case__ : List[Any] = list(model.named_children() )
snake_case__ : Tuple = [list_modules[-1][0]]
# add last module together with tied weights
snake_case__ : Union[str, Any] = set(_lowercase ) - set(_lowercase )
snake_case__ : List[Any] = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
snake_case__ : int = ['''.weight''', '''.bias''']
snake_case__ : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
snake_case__ : int = name.replace(_lowercase , '' )
filtered_module_names.append(_lowercase )
return filtered_module_names
| 712
|
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class __snake_case :
__lowerCamelCase = field(
metadata={"""help""": """The output directory where the model will be written."""} ,)
__lowerCamelCase = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don't set if you want to train an encoder model from scratch."""
)
} ,)
__lowerCamelCase = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don't set if you want to train a decoder model from scratch."""
)
} ,)
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
__lowerCamelCase = field(
default=_lowerCamelCase ,metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def UpperCamelCase__ ( ) -> Union[str, Any]:
snake_case__ : str = HfArgumentParser((ModelArguments,) )
((snake_case__) , ) : Dict = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
snake_case__ : List[str] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
snake_case__ : Optional[int] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
snake_case__ : Optional[Any] = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
snake_case__ : List[str] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
snake_case__ : Any = True
snake_case__ : Dict = True
snake_case__ : Tuple = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=A__ , decoder_config=A__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
snake_case__ : Optional[Any] = decoder_config.decoder_start_token_id
snake_case__ : Tuple = decoder_config.pad_token_id
if decoder_start_token_id is None:
snake_case__ : Optional[Any] = decoder_config.bos_token_id
if pad_token_id is None:
snake_case__ : int = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
snake_case__ : Union[str, Any] = decoder_config.eos_token_id
snake_case__ : Optional[int] = decoder_start_token_id
snake_case__ : int = pad_token_id
snake_case__ : Tuple = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
snake_case__ : int = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 699
| 0
|
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCAmelCase : Tuple = namedtuple(
'_TestCommandArgs',
[
'dataset',
'name',
'cache_dir',
'data_dir',
'all_configs',
'save_infos',
'ignore_verifications',
'force_redownload',
'clear_cache',
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict ) -> Tuple:
'''simple docstring'''
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowerCamelCase ( _UpperCamelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = _TestCommandArgs(dataset=_UpperCamelCase , all_configs=_UpperCamelCase , save_infos=_UpperCamelCase )
__UpperCAmelCase : List[Any] = TestCommand(*_UpperCamelCase )
test_command.run()
__UpperCAmelCase : List[Any] = os.path.join(_UpperCamelCase , """README.md""" )
assert os.path.exists(_UpperCamelCase )
__UpperCAmelCase : Any = DatasetInfosDict.from_directory(_UpperCamelCase )
__UpperCAmelCase : int = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2_3_5_1_5_6_3,
"""num_examples""": 1_0_0_0_0,
},
{
"""name""": """validation""",
"""num_bytes""": 2_3_8_4_1_8,
"""num_examples""": 1_0_0_0,
},
] , download_size=3_9_4_0_6_8_0 , dataset_size=2_5_8_9_9_8_1 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
__UpperCAmelCase ,__UpperCAmelCase : Optional[int] = getattr(dataset_infos["""default"""] , _UpperCamelCase ), getattr(expected_dataset_infos["""default"""] , _UpperCamelCase )
if key == "num_bytes":
assert is_apercent_close(_UpperCamelCase , _UpperCamelCase )
elif key == "splits":
assert list(_UpperCamelCase ) == list(_UpperCamelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 139
|
"""simple docstring"""
import itertools
import math
def lowerCamelCase ( _UpperCamelCase : int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase ( ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Tuple = 2
while True:
if is_prime(_UpperCamelCase ):
yield num
num += 1
def lowerCamelCase ( _UpperCamelCase : int = 1_0_0_0_1 ) -> int:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , _UpperCamelCase ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 139
| 1
|
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowercase ( __A , unittest.TestCase ):
_lowerCAmelCase = BertTokenizer
_lowerCAmelCase = BertTokenizerFast
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = filter_non_english
def __magic_name__ ( self : Dict ):
super().setUp()
a_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __magic_name__ ( self : List[str] , lowercase__ : Optional[Any] ):
a_ = 'UNwant\u00E9d,running'
a_ = 'unwanted, running'
return input_text, output_text
def __magic_name__ ( self : List[Any] ):
a_ = self.tokenizer_class(self.vocab_file )
a_ = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(lowercase__ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , [9, 6, 7, 1_2, 1_0, 1_1] )
def __magic_name__ ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
a_ = self.get_tokenizer()
a_ = self.get_rust_tokenizer()
a_ = 'UNwant\u00E9d,running'
a_ = tokenizer.tokenize(lowercase__ )
a_ = rust_tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
a_ = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
a_ = rust_tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
a_ = self.get_rust_tokenizer()
a_ = tokenizer.encode(lowercase__ )
a_ = rust_tokenizer.encode(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# With lower casing
a_ = self.get_tokenizer(do_lower_case=lowercase__ )
a_ = self.get_rust_tokenizer(do_lower_case=lowercase__ )
a_ = 'UNwant\u00E9d,running'
a_ = tokenizer.tokenize(lowercase__ )
a_ = rust_tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
a_ = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
a_ = rust_tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
a_ = self.get_rust_tokenizer()
a_ = tokenizer.encode(lowercase__ )
a_ = rust_tokenizer.encode(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
def __magic_name__ ( self : Any ):
a_ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __magic_name__ ( self : List[Any] ):
a_ = BasicTokenizer(do_lower_case=lowercase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __magic_name__ ( self : int ):
a_ = BasicTokenizer(do_lower_case=lowercase__ , strip_accents=lowercase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __magic_name__ ( self : Tuple ):
a_ = BasicTokenizer(do_lower_case=lowercase__ , strip_accents=lowercase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __magic_name__ ( self : List[Any] ):
a_ = BasicTokenizer(do_lower_case=lowercase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __magic_name__ ( self : List[str] ):
a_ = BasicTokenizer(do_lower_case=lowercase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __magic_name__ ( self : Any ):
a_ = BasicTokenizer(do_lower_case=lowercase__ , strip_accents=lowercase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __magic_name__ ( self : Tuple ):
a_ = BasicTokenizer(do_lower_case=lowercase__ , strip_accents=lowercase__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __magic_name__ ( self : Optional[int] ):
a_ = BasicTokenizer(do_lower_case=lowercase__ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __magic_name__ ( self : Optional[Any] ):
a_ = BasicTokenizer()
a_ = 'a\n\'ll !!to?\'d of, can\'t.'
a_ = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(lowercase__ ) , lowercase__ )
def __magic_name__ ( self : Optional[Any] ):
a_ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
a_ = {}
for i, token in enumerate(lowercase__ ):
a_ = i
a_ = WordpieceTokenizer(vocab=lowercase__ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def __magic_name__ ( self : Tuple ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __magic_name__ ( self : Optional[int] ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __magic_name__ ( self : Tuple ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __magic_name__ ( self : Dict ):
a_ = self.get_tokenizer()
a_ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(lowercase__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(lowercase__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def __magic_name__ ( self : Any ):
a_ = self.tokenizer_class.from_pretrained('''bert-base-uncased''' )
a_ = tokenizer.encode('''sequence builders''' , add_special_tokens=lowercase__ )
a_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowercase__ )
a_ = tokenizer.build_inputs_with_special_tokens(lowercase__ )
a_ = tokenizer.build_inputs_with_special_tokens(lowercase__ , lowercase__ )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def __magic_name__ ( self : Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
a_ = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
a_ = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
a_ = tokenizer_r.encode_plus(
lowercase__ , return_attention_mask=lowercase__ , return_token_type_ids=lowercase__ , return_offsets_mapping=lowercase__ , add_special_tokens=lowercase__ , )
a_ = tokenizer_r.do_lower_case if hasattr(lowercase__ , '''do_lower_case''' ) else False
a_ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'Allen'),
((2_1, 2_3), '##NL'),
((2_3, 2_4), '##P'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'allen'),
((2_1, 2_3), '##nl'),
((2_3, 2_4), '##p'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def __magic_name__ ( self : int ):
a_ = ['的', '人', '有']
a_ = ''.join(lowercase__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
a_ = True
a_ = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
a_ = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
a_ = tokenizer_p.encode(lowercase__ , add_special_tokens=lowercase__ )
a_ = tokenizer_r.encode(lowercase__ , add_special_tokens=lowercase__ )
a_ = tokenizer_r.convert_ids_to_tokens(lowercase__ )
a_ = tokenizer_p.convert_ids_to_tokens(lowercase__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(lowercase__ , lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
a_ = False
a_ = self.rust_tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
a_ = self.tokenizer_class.from_pretrained(lowercase__ , **lowercase__ )
a_ = tokenizer_r.encode(lowercase__ , add_special_tokens=lowercase__ )
a_ = tokenizer_p.encode(lowercase__ , add_special_tokens=lowercase__ )
a_ = tokenizer_r.convert_ids_to_tokens(lowercase__ )
a_ = tokenizer_p.convert_ids_to_tokens(lowercase__ )
# it is expected that only the first Chinese character is not preceded by "##".
a_ = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(lowercase__ )
]
self.assertListEqual(lowercase__ , lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
| 704
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
UpperCamelCase__ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase__ = {
'''vocab_file''': {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''unc-nlp/lxmert-base-uncased''': (
'''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase__ = {
'''unc-nlp/lxmert-base-uncased''': 512,
}
UpperCamelCase__ = {
'''unc-nlp/lxmert-base-uncased''': {'''do_lower_case''': True},
}
class __lowercase ( a__ ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = LxmertTokenizer
def __init__( self : List[str] , lowercase__ : Optional[int]=None , lowercase__ : str=None , lowercase__ : str=True , lowercase__ : Union[str, Any]="[UNK]" , lowercase__ : List[Any]="[SEP]" , lowercase__ : Optional[Any]="[PAD]" , lowercase__ : Union[str, Any]="[CLS]" , lowercase__ : Optional[int]="[MASK]" , lowercase__ : Dict=True , lowercase__ : List[Any]=None , **lowercase__ : List[str] , ):
super().__init__(
lowercase__ , tokenizer_file=lowercase__ , do_lower_case=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , pad_token=lowercase__ , cls_token=lowercase__ , mask_token=lowercase__ , tokenize_chinese_chars=lowercase__ , strip_accents=lowercase__ , **lowercase__ , )
a_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowercase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowercase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowercase__ ) != tokenize_chinese_chars
):
a_ = getattr(lowercase__ , normalizer_state.pop('''type''' ) )
a_ = do_lower_case
a_ = strip_accents
a_ = tokenize_chinese_chars
a_ = normalizer_class(**lowercase__ )
a_ = do_lower_case
def __magic_name__ ( self : List[str] , lowercase__ : Any , lowercase__ : List[str]=None ):
a_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self : Optional[int] , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None ):
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self : Any , lowercase__ : str , lowercase__ : Optional[str] = None ):
a_ = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
| 143
| 0
|
"""simple docstring"""
def lowercase__(A ) ->list:
"""simple docstring"""
def merge(A , A ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(__lowercase ) <= 1:
return collection
lowercase__ : Union[str, Any]= len(__lowercase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a : int = input("""Enter numbers separated by a comma:\n""").strip()
a : Any = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 218
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : Any = tempfile.mkdtemp()
# fmt: off
_snake_case : Optional[Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_snake_case : Dict = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
_snake_case : Dict = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_snake_case : Optional[int] = {"unk_token": "<unk>"}
_snake_case : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_snake_case : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowercase_ ) )
_snake_case : Any = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_snake_case : Optional[Any] = os.path.join(self.tmpdirname , lowercase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowercase_ , lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_snake_case : Union[str, Any] = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self ):
_snake_case : Tuple = self.get_tokenizer()
_snake_case : Any = self.get_rust_tokenizer()
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : Any = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case : Optional[int] = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ )
_snake_case : List[Any] = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case : Optional[Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase_ )
self.assertIsInstance(processor_fast.tokenizer , lowercase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase_ )
self.assertIsInstance(processor_fast.image_processor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : List[Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_snake_case : Optional[Any] = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
_snake_case : Tuple = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.get_image_processor()
_snake_case : Any = self.get_tokenizer()
_snake_case : int = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = self.prepare_image_inputs()
_snake_case : Optional[Any] = image_processor(lowercase_ , return_tensors="np" )
_snake_case : str = processor(images=lowercase_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = self.get_image_processor()
_snake_case : Any = self.get_tokenizer()
_snake_case : Dict = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : List[str] = "lower newer"
_snake_case : int = processor(text=lowercase_ )
_snake_case : str = tokenizer(lowercase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self ):
_snake_case : List[Any] = self.get_image_processor()
_snake_case : int = self.get_tokenizer()
_snake_case : Tuple = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : List[Any] = "lower newer"
_snake_case : int = self.prepare_image_inputs()
_snake_case : Dict = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCamelCase ( self ):
_snake_case : Dict = self.get_image_processor()
_snake_case : List[str] = self.get_tokenizer()
_snake_case : Union[str, Any] = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = self.prepare_image_inputs()
_snake_case : Dict = self.prepare_image_inputs()
_snake_case : List[Any] = processor(images=lowercase_ , visual_prompt=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCamelCase ( self ):
_snake_case : Dict = self.get_image_processor()
_snake_case : List[Any] = self.get_tokenizer()
_snake_case : str = CLIPSegProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case : Any = processor.batch_decode(lowercase_ )
_snake_case : Any = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
| 670
| 0
|
'''simple docstring'''
import math
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
'''simple docstring'''
__lowercase = len(_UpperCAmelCase )
__lowercase = int(math.floor(math.sqrt(_UpperCAmelCase ) ) )
__lowercase = 0
while arr[min(_UpperCAmelCase , _UpperCAmelCase ) - 1] < x:
__lowercase = step
step += int(math.floor(math.sqrt(_UpperCAmelCase ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__lowercase = prev + 1
if prev == min(_UpperCAmelCase , _UpperCAmelCase ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCAmelCase__ = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(',')]
lowerCAmelCase__ = int(input('Enter the number to be searched:\n'))
lowerCAmelCase__ = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(F"Number {x} is at index {res}")
| 719
|
from __future__ import annotations
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
'''simple docstring'''
if len(_UpperCAmelCase ) < k or k < 0:
raise ValueError("Invalid Input" )
__lowercase = __lowercase = sum(array[:k] )
for i in range(len(_UpperCAmelCase ) - k ):
__lowercase = current_sum - array[i] + array[i + k]
__lowercase = max(_UpperCAmelCase , _UpperCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowerCAmelCase__ = [randint(-1_000, 1_000) for i in range(100)]
lowerCAmelCase__ = randint(0, 110)
print(F"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}")
| 576
| 0
|
from jiwer import compute_measures
import datasets
_UpperCAmelCase : Optional[int] = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
_UpperCAmelCase : Optional[Any] = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
_UpperCAmelCase : Tuple = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def __snake_case ( self : str ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : List[str]=False ):
if concatenate_texts:
return compute_measures(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )["wer"]
else:
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
for prediction, reference in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = compute_measures(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 668
|
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
_UpperCAmelCase : int = Mapping[str, np.ndarray]
_UpperCAmelCase : Optional[Any] = Mapping[str, Any] # Is a nested dict.
_UpperCAmelCase : Optional[Any] = 0.01
@dataclasses.dataclass(frozen=snake_case__ )
class lowerCAmelCase_ :
UpperCamelCase_ :np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
UpperCamelCase_ :np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
UpperCamelCase_ :np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
UpperCamelCase_ :np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
UpperCamelCase_ :np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
UpperCamelCase_ :Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
UpperCamelCase_ :Optional[str] = None
# Templates used to generate this protein (prediction-only)
UpperCamelCase_ :Optional[Sequence[str]] = None
# Chain corresponding to each parent
UpperCamelCase_ :Optional[Sequence[int]] = None
def lowerCAmelCase_ (lowercase__ : str ) -> Protein:
'''simple docstring'''
lowerCAmelCase__ = r'''(\[[A-Z]+\]\n)'''
lowerCAmelCase__ = [tag.strip() for tag in re.split(lowercase__ , lowercase__ ) if len(lowercase__ ) > 0]
lowerCAmelCase__ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
lowerCAmelCase__ = ["N", "CA", "C"]
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowerCAmelCase__ = g[1][0].strip()
for i in range(len(lowercase__ ) ):
if seq[i] not in residue_constants.restypes:
lowerCAmelCase__ = '''X''' # FIXME: strings are immutable
lowerCAmelCase__ = np.array(
[residue_constants.restype_order.get(lowercase__ , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowerCAmelCase__ = []
for axis in range(3 ):
tertiary.append(list(map(lowercase__ , g[1][axis].split() ) ) )
lowerCAmelCase__ = np.array(lowercase__ )
lowerCAmelCase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(lowercase__ ):
lowerCAmelCase__ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowerCAmelCase__ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
lowerCAmelCase__ = np.zeros(
(
len(lowercase__ ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(lowercase__ ):
lowerCAmelCase__ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=lowercase__ , atom_mask=lowercase__ , aatype=lowercase__ , residue_index=np.arange(len(lowercase__ ) ) , b_factors=lowercase__ , )
def lowerCAmelCase_ (lowercase__ : Protein , lowercase__ : int = 0 ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = prot.remark
if remark is not None:
pdb_headers.append(f'REMARK {remark}' )
lowerCAmelCase__ = prot.parents
lowerCAmelCase__ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowerCAmelCase__ = [p for i, p in zip(lowercase__ , lowercase__ ) if i == chain_id]
if parents is None or len(lowercase__ ) == 0:
lowerCAmelCase__ = ['''N/A''']
pdb_headers.append(f'PARENT {" ".join(lowercase__ )}' )
return pdb_headers
def lowerCAmelCase_ (lowercase__ : Protein , lowercase__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = pdb_str.split('''\n''' )
lowerCAmelCase__ = prot.remark
if remark is not None:
out_pdb_lines.append(f'REMARK {remark}' )
lowerCAmelCase__ = 42
if prot.parents is not None and len(prot.parents ) > 0:
lowerCAmelCase__ = []
if prot.parents_chain_index is not None:
lowerCAmelCase__ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(lowercase__ ) , [] )
parent_dict[str(lowercase__ )].append(lowercase__ )
lowerCAmelCase__ = max([int(lowercase__ ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowerCAmelCase__ = parent_dict.get(str(lowercase__ ) , ['''N/A'''] )
parents_per_chain.append(lowercase__ )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowerCAmelCase__ = [['''N/A''']]
def make_parent_line(lowercase__ : Sequence[str] ) -> str:
return f'PARENT {" ".join(lowercase__ )}'
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowerCAmelCase__ = 0
for i, l in enumerate(lowercase__ ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(lowercase__ )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(lowercase__ ):
lowerCAmelCase__ = parents_per_chain[chain_counter]
else:
lowerCAmelCase__ = ['''N/A''']
out_pdb_lines.append(make_parent_line(lowercase__ ) )
return "\n".join(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Protein ) -> str:
'''simple docstring'''
lowerCAmelCase__ = residue_constants.restypes + ['''X''']
def res_atoa(lowercase__ : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
lowerCAmelCase__ = residue_constants.atom_types
lowerCAmelCase__ = []
lowerCAmelCase__ = prot.atom_mask
lowerCAmelCase__ = prot.aatype
lowerCAmelCase__ = prot.atom_positions
lowerCAmelCase__ = prot.residue_index.astype(np.intaa )
lowerCAmelCase__ = prot.b_factors
lowerCAmelCase__ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
lowerCAmelCase__ = get_pdb_headers(lowercase__ )
if len(lowercase__ ) > 0:
pdb_lines.extend(lowercase__ )
lowerCAmelCase__ = aatype.shape[0]
lowerCAmelCase__ = 1
lowerCAmelCase__ = 0
lowerCAmelCase__ = string.ascii_uppercase
lowerCAmelCase__ = None
# Add all atom sites.
for i in range(lowercase__ ):
lowerCAmelCase__ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(lowercase__ , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowerCAmelCase__ = '''ATOM'''
lowerCAmelCase__ = atom_name if len(lowercase__ ) == 4 else f' {atom_name}'
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = 1.00
lowerCAmelCase__ = atom_name[0] # Protein supports only C, N, O, S, this works.
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = '''A'''
if chain_index is not None:
lowerCAmelCase__ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowerCAmelCase__ = (
f'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'
f'{res_name_a:>3} {chain_tag:>1}'
f'{residue_index[i]:>4}{insertion_code:>1} '
f'{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'
f'{occupancy:>6.2f}{b_factor:>6.2f} '
f'{element:>2}{charge:>2}'
)
pdb_lines.append(lowercase__ )
atom_index += 1
lowerCAmelCase__ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowerCAmelCase__ = True
lowerCAmelCase__ = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowerCAmelCase__ = '''TER'''
lowerCAmelCase__ = (
f'{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'
)
pdb_lines.append(lowercase__ )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(lowercase__ , lowercase__ ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Protein ) -> np.ndarray:
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def lowerCAmelCase_ (lowercase__ : FeatureDict , lowercase__ : ModelOutput , lowercase__ : Optional[np.ndarray] = None , lowercase__ : Optional[np.ndarray] = None , lowercase__ : Optional[str] = None , lowercase__ : Optional[Sequence[str]] = None , lowercase__ : Optional[Sequence[int]] = None , ) -> Protein:
'''simple docstring'''
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=lowercase__ , remark=lowercase__ , parents=lowercase__ , parents_chain_index=lowercase__ , )
| 668
| 1
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Dict = LongformerTokenizer
UpperCAmelCase__: Optional[Any] = True
UpperCAmelCase__: Union[str, Any] = LongformerTokenizerFast
UpperCAmelCase__: Optional[Any] = True
def __A ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ : Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
A__ : Optional[Any] = dict(zip(A__ , range(len(A__ ) ) ) )
A__ : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
A__ : str = {"""unk_token""": """<unk>"""}
A__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A__ ) )
def __A ( self , **A__ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A__ )
def __A ( self , **A__ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A__ )
def __A ( self , A__ ):
A__ : Dict = """lower newer"""
A__ : Dict = """lower newer"""
return input_text, output_text
def __A ( self ):
A__ : List[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
A__ : Union[str, Any] = """lower newer"""
A__ : Dict = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
A__ : List[Any] = tokenizer.tokenize(A__ ) # , add_prefix_space=True)
self.assertListEqual(A__ , A__ )
A__ : Dict = tokens + [tokenizer.unk_token]
A__ : List[str] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ )
def __A ( self ):
A__ : Tuple = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=A__ ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=A__ ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def __A ( self ):
A__ : Tuple = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
A__ : Dict = tokenizer.encode("""sequence builders""" , add_special_tokens=A__ )
A__ : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A__ )
A__ : Any = tokenizer.encode(
"""sequence builders""" , add_special_tokens=A__ , add_prefix_space=A__ )
A__ : List[str] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=A__ , add_prefix_space=A__ )
A__ : Tuple = tokenizer.build_inputs_with_special_tokens(A__ )
A__ : List[Any] = tokenizer.build_inputs_with_special_tokens(A__ , A__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __A ( self ):
A__ : Any = self.get_tokenizer()
A__ : str = """Encode this sequence."""
A__ : List[Any] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
A__ : Optional[Any] = tokenizer.encode(A__ , add_special_tokens=A__ , add_prefix_space=A__ )
A__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A__ , A__ )
A__ : str = tokenizer.encode(A__ , add_special_tokens=A__ , add_prefix_space=A__ )
A__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A__ , A__ )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
A__ : Optional[int] = tokenizer.encode(A__ , add_special_tokens=A__ )
A__ : Any = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A__ , A__ )
# Testing spaces after special tokens
A__ : Dict = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(A__ , lstrip=A__ , rstrip=A__ )} ) # mask token has a left space
A__ : Dict = tokenizer.convert_tokens_to_ids(A__ )
A__ : List[Any] = """Encode <mask> sequence"""
A__ : Tuple = """Encode <mask>sequence"""
A__ : Optional[int] = tokenizer.encode(A__ )
A__ : Union[str, Any] = encoded.index(A__ )
A__ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A__ , A__ )
A__ : Optional[int] = tokenizer.encode(A__ )
A__ : Union[str, Any] = encoded.index(A__ )
A__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A__ , A__ )
def __A ( self ):
pass
def __A ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ : List[Any] = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
A__ : int = self.tokenizer_class.from_pretrained(A__ , **A__ )
A__ : Optional[Any] = """A, <mask> AllenNLP sentence."""
A__ : Union[str, Any] = tokenizer_r.encode_plus(A__ , add_special_tokens=A__ , return_token_type_ids=A__ )
A__ : Dict = tokenizer_p.encode_plus(A__ , add_special_tokens=A__ , return_token_type_ids=A__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
A__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
A__ : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
A__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
A__ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def __A ( self ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
A__ : str = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
A__ : Dict = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
A__ : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , A__ )
self.assertEqual(post_processor_state["""add_prefix_space"""] , A__ )
self.assertEqual(post_processor_state["""trim_offsets"""] , A__ )
def __A ( self ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A__ : List[Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
A__ : int = F"""{text_of_1_token} {text_of_1_token}"""
A__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
A__ : Union[str, Any] = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A__ ) + 1, len(A__ ) + 1 + len(A__ )) , )
A__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
A__ : Union[str, Any] = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A__ ) + 1, len(A__ ) + 1 + len(A__ )) , )
A__ : List[str] = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
A__ : str = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A__ ), len(A__ ) + 1 + len(A__ )) , )
A__ : str = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
A__ : Tuple = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A__ ), len(A__ ) + 1 + len(A__ )) , )
A__ : Any = F""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
A__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
A__ : Optional[Any] = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A__ ) + 1, 1 + len(A__ ) + 1 + len(A__ )) , )
A__ : int = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
A__ : Any = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A__ ), 1 + len(A__ ) + 1 + len(A__ )) , )
A__ : int = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , add_prefix_space=A__ , trim_offsets=A__ )
A__ : List[str] = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A__ ), 1 + len(A__ ) + 1 + len(A__ )) , )
| 705
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = TextToVideoSDPipeline
UpperCAmelCase__: Any = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__: Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
UpperCAmelCase__: Optional[int] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __A ( self ):
torch.manual_seed(0 )
A__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
A__ : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A__ , set_alpha_to_one=A__ , )
torch.manual_seed(0 )
A__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A__ : Union[str, Any] = CLIPTextModel(A__ )
A__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def __A ( self , A__ , A__=0 ):
if str(A__ ).startswith("""mps""" ):
A__ : Tuple = torch.manual_seed(A__ )
else:
A__ : List[str] = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def __A ( self ):
A__ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Union[str, Any] = self.get_dummy_components()
A__ : Union[str, Any] = TextToVideoSDPipeline(**A__ )
A__ : int = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
A__ : int = self.get_dummy_inputs(A__ )
A__ : int = """np"""
A__ : Any = sd_pipe(**A__ ).frames
A__ : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
A__ : Optional[Any] = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A__ , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A__ , expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def __A ( self ):
pass
def __A ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
A__ : Tuple = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A__ : int = pipe.to("""cuda""" )
A__ : Optional[Any] = """Spiderman is surfing"""
A__ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[Any] = pipe(A__ , generator=A__ , num_inference_steps=25 , output_type="""pt""" ).frames
A__ : Dict = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def __A ( self ):
A__ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
A__ : Optional[int] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : List[str] = pipe.to("""cuda""" )
A__ : Dict = """Spiderman is surfing"""
A__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[int] = pipe(A__ , generator=A__ , num_inference_steps=2 , output_type="""pt""" ).frames
A__ : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 64
| 0
|
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
if isinstance(SCREAMING_SNAKE_CASE , torch.Tensor ):
return image
elif isinstance(SCREAMING_SNAKE_CASE , PIL.Image.Image ):
_lowercase : List[Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowercase : Union[str, Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_lowercase : Dict = np.concatenate(SCREAMING_SNAKE_CASE , axis=0 )
_lowercase : Dict = np.array(SCREAMING_SNAKE_CASE ).astype(np.floataa ) / 255.0
_lowercase : Any = image.transpose(0 , 3 , 1 , 2 )
_lowercase : Optional[int] = 2.0 * image - 1.0
_lowercase : Union[str, Any] = torch.from_numpy(SCREAMING_SNAKE_CASE )
elif isinstance(image[0] , torch.Tensor ):
_lowercase : Optional[Any] = torch.cat(SCREAMING_SNAKE_CASE , dim=0 )
return image
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0.9995 ) -> str:
if not isinstance(SCREAMING_SNAKE_CASE , np.ndarray ):
_lowercase : Optional[Any] = True
_lowercase : Any = va.device
_lowercase : str = va.cpu().numpy()
_lowercase : str = va.cpu().numpy()
_lowercase : Tuple = np.sum(va * va / (np.linalg.norm(SCREAMING_SNAKE_CASE ) * np.linalg.norm(SCREAMING_SNAKE_CASE )) )
if np.abs(SCREAMING_SNAKE_CASE ) > DOT_THRESHOLD:
_lowercase : Dict = (1 - t) * va + t * va
else:
_lowercase : Any = np.arccos(SCREAMING_SNAKE_CASE )
_lowercase : Dict = np.sin(SCREAMING_SNAKE_CASE )
_lowercase : Any = theta_a * t
_lowercase : Optional[Any] = np.sin(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = np.sin(theta_a - theta_t ) / sin_theta_a
_lowercase : List[str] = sin_theta_t / sin_theta_a
_lowercase : Optional[Any] = sa * va + sa * va
if inputs_are_torch:
_lowercase : Optional[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
return va
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_lowercase : Any = F.normalize(SCREAMING_SNAKE_CASE , dim=-1 )
_lowercase : Union[str, Any] = F.normalize(SCREAMING_SNAKE_CASE , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
for param in model.parameters():
_lowercase : List[str] = value
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , ):
super().__init__()
self.register_modules(
vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , clip_model=_lowerCAmelCase , tokenizer=_lowerCAmelCase , unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , coca_model=_lowerCAmelCase , coca_tokenizer=_lowerCAmelCase , coca_transform=_lowerCAmelCase , )
_lowercase : str = (
feature_extractor.size
if isinstance(feature_extractor.size , _lowerCAmelCase )
else feature_extractor.size['shortest_edge']
)
_lowercase : List[str] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _lowerCAmelCase )
set_requires_grad(self.clip_model , _lowerCAmelCase )
def __a ( self , _lowerCAmelCase = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowercase : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowerCAmelCase )
def __a ( self ):
self.enable_attention_slicing(_lowerCAmelCase )
def __a ( self ):
set_requires_grad(self.vae , _lowerCAmelCase )
def __a ( self ):
set_requires_grad(self.vae , _lowerCAmelCase )
def __a ( self ):
set_requires_grad(self.unet , _lowerCAmelCase )
def __a ( self ):
set_requires_grad(self.unet , _lowerCAmelCase )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# get the original timestep using init_timestep
_lowercase : Union[str, Any] = min(int(num_inference_steps * strength ) , _lowerCAmelCase )
_lowercase : List[Any] = max(num_inference_steps - init_timestep , 0 )
_lowercase : Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
if not isinstance(_lowerCAmelCase , torch.Tensor ):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(_lowerCAmelCase )}""" )
_lowercase : Tuple = image.to(device=_lowerCAmelCase , dtype=_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
_lowercase : int = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowerCAmelCase )
]
_lowercase : int = torch.cat(_lowerCAmelCase , dim=0 )
else:
_lowercase : List[str] = self.vae.encode(_lowerCAmelCase ).latent_dist.sample(_lowerCAmelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : Optional[int] = 0.1_82_15 * init_latents
_lowercase : int = init_latents.repeat_interleave(_lowerCAmelCase , dim=0 )
_lowercase : Union[str, Any] = randn_tensor(init_latents.shape , generator=_lowerCAmelCase , device=_lowerCAmelCase , dtype=_lowerCAmelCase )
# get latents
_lowercase : Dict = self.scheduler.add_noise(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : List[Any] = init_latents
return latents
def __a ( self , _lowerCAmelCase ):
_lowercase : Dict = self.coca_transform(_lowerCAmelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_lowercase : List[str] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_lowercase : List[Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : Any = self.feature_extractor.preprocess(_lowerCAmelCase )
_lowercase : Any = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
_lowercase : int = self.clip_model.get_image_features(_lowerCAmelCase )
_lowercase : Any = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_lowerCAmelCase )
_lowercase : List[Any] = image_embeddings_clip.repeat_interleave(_lowerCAmelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
_lowercase : Optional[Any] = latents.detach().requires_grad_()
_lowercase : Any = self.scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
# predict the noise residual
_lowercase : Union[str, Any] = self.unet(_lowerCAmelCase , _lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_lowercase : List[str] = self.scheduler.alphas_cumprod[timestep]
_lowercase : Any = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowercase : Dict = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowercase : Any = torch.sqrt(_lowerCAmelCase )
_lowercase : Union[str, Any] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _lowerCAmelCase ):
_lowercase : Optional[Any] = self.scheduler.sigmas[index]
_lowercase : Optional[int] = latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : Tuple = 1 / 0.1_82_15 * sample
_lowercase : Union[str, Any] = self.vae.decode(_lowerCAmelCase ).sample
_lowercase : Any = (image / 2 + 0.5).clamp(0 , 1 )
_lowercase : str = transforms.Resize(self.feature_extractor_size )(_lowerCAmelCase )
_lowercase : Union[str, Any] = self.normalize(_lowerCAmelCase ).to(latents.dtype )
_lowercase : Optional[Any] = self.clip_model.get_image_features(_lowerCAmelCase )
_lowercase : Optional[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_lowerCAmelCase )
_lowercase : Optional[Any] = spherical_dist_loss(_lowerCAmelCase , _lowerCAmelCase ).mean() * clip_guidance_scale
_lowercase : Tuple = -torch.autograd.grad(_lowerCAmelCase , _lowerCAmelCase )[0]
if isinstance(self.scheduler , _lowerCAmelCase ):
_lowercase : List[Any] = latents.detach() + grads * (sigma**2)
_lowercase : Union[str, Any] = noise_pred_original
else:
_lowercase : int = noise_pred_original - torch.sqrt(_lowerCAmelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 5_1_2 , _lowerCAmelCase = 0.6 , _lowerCAmelCase = 5_0 , _lowerCAmelCase = 7.5 , _lowerCAmelCase = 1 , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 1_0_0 , _lowerCAmelCase = None , _lowerCAmelCase = "pil" , _lowerCAmelCase = True , _lowerCAmelCase = 0.8 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = 0.1 , ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(_lowerCAmelCase )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(_lowerCAmelCase , torch.Generator ) and batch_size > 1:
_lowercase : List[Any] = [generator] + [None] * (batch_size - 1)
_lowercase : List[Any] = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_lowercase : Dict = [x[0] for x in coca_is_none if x[1]]
_lowercase : Any = ', '.join(_lowerCAmelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_lowerCAmelCase ):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowercase : Tuple = self.get_image_description(_lowerCAmelCase )
if style_prompt is None:
if len(_lowerCAmelCase ):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
_lowercase : List[str] = self.get_image_description(_lowerCAmelCase )
# get prompt text embeddings for content and style
_lowercase : int = self.tokenizer(
_lowerCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors='pt' , )
_lowercase : List[str] = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_lowercase : Tuple = self.tokenizer(
_lowerCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors='pt' , )
_lowercase : Tuple = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_lowercase : int = slerp(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# duplicate text embeddings for each generation per prompt
_lowercase : Optional[int] = text_embeddings.repeat_interleave(_lowerCAmelCase , dim=0 )
# set timesteps
_lowercase : Tuple = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_lowercase : Optional[Any] = {}
if accepts_offset:
_lowercase : int = 1
self.scheduler.set_timesteps(_lowerCAmelCase , **_lowerCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_lowercase , _lowercase : List[str] = self.get_timesteps(_lowerCAmelCase , _lowerCAmelCase , self.device )
_lowercase : str = timesteps[:1].repeat(_lowerCAmelCase )
# Preprocess image
_lowercase : List[Any] = preprocess(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : Tuple = self.prepare_latents(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , text_embeddings.dtype , self.device , _lowerCAmelCase )
_lowercase : List[str] = preprocess(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowercase : List[Any] = self.prepare_latents(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , text_embeddings.dtype , self.device , _lowerCAmelCase )
_lowercase : int = slerp(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if clip_guidance_scale > 0:
_lowercase : int = self.get_clip_image_embeddings(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : str = self.get_clip_image_embeddings(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Tuple = slerp(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowercase : int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowercase : Optional[int] = content_text_input.input_ids.shape[-1]
_lowercase : Optional[int] = self.tokenizer([''] , padding='max_length' , max_length=_lowerCAmelCase , return_tensors='pt' )
_lowercase : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_lowercase : Tuple = uncond_embeddings.repeat_interleave(_lowerCAmelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowercase : Tuple = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowercase : Union[str, Any] = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowercase : Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowercase : Optional[int] = torch.randn(_lowerCAmelCase , generator=_lowerCAmelCase , device='cpu' , dtype=_lowerCAmelCase ).to(
self.device )
else:
_lowercase : Tuple = torch.randn(_lowerCAmelCase , generator=_lowerCAmelCase , device=self.device , dtype=_lowerCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_lowercase : Any = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowercase : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowercase : Optional[int] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowercase : str = {}
if accepts_eta:
_lowercase : Union[str, Any] = eta
# check if the scheduler accepts generator
_lowercase : Tuple = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_lowercase : int = generator
with self.progress_bar(total=_lowerCAmelCase ):
for i, t in enumerate(_lowerCAmelCase ):
# expand the latents if we are doing classifier free guidance
_lowercase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowercase : Tuple = self.scheduler.scale_model_input(_lowerCAmelCase , _lowerCAmelCase )
# predict the noise residual
_lowercase : Optional[Any] = self.unet(_lowerCAmelCase , _lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowercase , _lowercase : Any = noise_pred.chunk(2 )
_lowercase : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowercase : Optional[Any] = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_lowercase , _lowercase : Tuple = self.cond_fn(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , )
# compute the previous noisy sample x_t -> x_t-1
_lowercase : Optional[Any] = self.scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowercase : Optional[Any] = 1 / 0.1_82_15 * latents
_lowercase : List[Any] = self.vae.decode(_lowerCAmelCase ).sample
_lowercase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
_lowercase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowercase : Tuple = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_lowerCAmelCase , nsfw_content_detected=_lowerCAmelCase )
| 66
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_UpperCAmelCase : List[Any] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
_UpperCAmelCase : Union[str, Any] = {
"camembert-base": 512,
}
_UpperCAmelCase : Dict = "▁"
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :int = VOCAB_FILES_NAMES
UpperCamelCase_ :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ :Dict = ['input_ids', 'attention_mask']
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any="<s>" , SCREAMING_SNAKE_CASE_ : Tuple="</s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE_ : str="<mask>" , SCREAMING_SNAKE_CASE_ : int=["<s>NOTUSED", "</s>NOTUSED"] , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowerCAmelCase__ = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
lowerCAmelCase__ = len(self.fairseq_tokens_to_ids )
lowerCAmelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case ( self : List[Any] ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __snake_case ( self : int ):
lowerCAmelCase__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : str ):
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = []
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
lowerCAmelCase__ = True
lowerCAmelCase__ = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def __getstate__( self : Optional[Any] ):
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : str , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 668
| 0
|
'''simple docstring'''
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
snake_case__ : int = True
except ImportError:
snake_case__ : Union[str, Any] = False
try:
from torch.hub import _get_torch_home
snake_case__ : Tuple = _get_torch_home()
except ImportError:
snake_case__ : int = os.path.expanduser(
os.getenv('''TORCH_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''torch'''))
)
snake_case__ : Any = os.path.join(torch_cache_home, '''transformers''')
snake_case__ : str = '''https://cdn.huggingface.co'''
snake_case__ : int = '''https://s3.amazonaws.com/models.huggingface.co/bert'''
snake_case__ : str = '''/'''.join(str(Path(__file__).resolve()).split('''/''')[:-1])
snake_case__ : Tuple = os.path.join(PATH, '''config.yaml''')
snake_case__ : List[str] = os.path.join(PATH, '''attributes.txt''')
snake_case__ : Optional[Any] = os.path.join(PATH, '''objects.txt''')
snake_case__ : int = os.getenv('''PYTORCH_PRETRAINED_BERT_CACHE''', default_cache_path)
snake_case__ : int = os.getenv('''PYTORCH_TRANSFORMERS_CACHE''', PYTORCH_PRETRAINED_BERT_CACHE)
snake_case__ : List[Any] = os.getenv('''TRANSFORMERS_CACHE''', PYTORCH_TRANSFORMERS_CACHE)
snake_case__ : Optional[Any] = '''pytorch_model.bin'''
snake_case__ : List[str] = '''config.yaml'''
def _lowerCamelCase ( lowerCamelCase_ : Union[str, Any]=OBJECTS , lowerCamelCase_ : Tuple=ATTRIBUTES ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = []
with open(lowerCamelCase_ ) as f:
for object in f.readlines():
vg_classes.append(object.split(',' )[0].lower().strip() )
UpperCAmelCase_ : List[Any] = []
with open(lowerCamelCase_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split(',' )[0].lower().strip() )
return vg_classes, vg_attrs
def _lowerCamelCase ( lowerCamelCase_ : int ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = OrderedDict()
with open(lowerCamelCase_ , 'rb' ) as f:
UpperCAmelCase_ : List[str] = pkl.load(lowerCamelCase_ )['model']
for k in copy.deepcopy(list(ckp.keys() ) ):
UpperCAmelCase_ : Union[str, Any] = ckp.pop(lowerCamelCase_ )
if isinstance(lowerCamelCase_ , np.ndarray ):
UpperCAmelCase_ : int = torch.tensor(lowerCamelCase_ )
else:
assert isinstance(lowerCamelCase_ , torch.tensor ), type(lowerCamelCase_ )
UpperCAmelCase_ : Dict = v
return r
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowerCamelCase_ :Dict = {}
def __init__( self , snake_case_ , snake_case_ = "root" , snake_case_=0 ):
'''simple docstring'''
UpperCAmelCase_ : str = name
UpperCAmelCase_ : Tuple = level
UpperCAmelCase_ : Optional[Any] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
UpperCAmelCase_ : List[str] = copy.deepcopy(snake_case_ )
UpperCAmelCase_ : Tuple = copy.deepcopy(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase_ : Tuple = Config(snake_case_ , name=snake_case_ , level=level + 1 )
UpperCAmelCase_ : int = v
setattr(self , snake_case_ , snake_case_ )
UpperCAmelCase_ : int = d
def __repr__( self ):
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : str = val
UpperCAmelCase_ : List[Any] = val
UpperCAmelCase_ : Optional[int] = key.split('.' )
UpperCAmelCase_ : Optional[int] = len(snake_case_ ) - 1
UpperCAmelCase_ : Optional[Any] = self._pointer
if len(snake_case_ ) > 1:
for i, l in enumerate(snake_case_ ):
if hasattr(self , snake_case_ ) and isinstance(getattr(self , snake_case_ ) , snake_case_ ):
setattr(getattr(self , snake_case_ ) , '.'.join(levels[i:] ) , snake_case_ )
if l == last_level:
UpperCAmelCase_ : List[str] = val
else:
UpperCAmelCase_ : Any = pointer[l]
def _UpperCamelCase ( self ):
'''simple docstring'''
return self._pointer
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
with open(F'''{file_name}''' , 'w' ) as stream:
dump(snake_case_ , snake_case_ )
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
'''simple docstring'''
with open(F'''{file_name}''' , 'w' ) as stream:
json.dump(snake_case_ , snake_case_ )
@staticmethod
def _UpperCamelCase ( snake_case_ ):
'''simple docstring'''
with open(snake_case_ ) as stream:
UpperCAmelCase_ : int = load(snake_case_ , Loader=snake_case_ )
return data
def __str__( self ):
'''simple docstring'''
UpperCAmelCase_ : str = ' '
if self._name != "root":
UpperCAmelCase_ : Dict = F'''{t * (self._level-1)}{self._name}:\n'''
else:
UpperCAmelCase_ : Union[str, Any] = ''
UpperCAmelCase_ : Union[str, Any] = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(snake_case_ , snake_case_ ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(snake_case_ ).__name__})\n'''
UpperCAmelCase_ : Any = level
return r[:-1]
@classmethod
def _UpperCamelCase ( cls , snake_case_ , **snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = cls.get_config_dict(snake_case_ , **snake_case_ )
return cls(snake_case_ )
@classmethod
def _UpperCamelCase ( cls , snake_case_ , **snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Any = kwargs.pop('cache_dir' , snake_case_ )
UpperCAmelCase_ : Optional[int] = kwargs.pop('force_download' , snake_case_ )
UpperCAmelCase_ : Optional[int] = kwargs.pop('resume_download' , snake_case_ )
UpperCAmelCase_ : List[str] = kwargs.pop('proxies' , snake_case_ )
UpperCAmelCase_ : Any = kwargs.pop('local_files_only' , snake_case_ )
if os.path.isdir(snake_case_ ):
UpperCAmelCase_ : Union[str, Any] = os.path.join(snake_case_ , snake_case_ )
elif os.path.isfile(snake_case_ ) or is_remote_url(snake_case_ ):
UpperCAmelCase_ : Optional[int] = pretrained_model_name_or_path
else:
UpperCAmelCase_ : Union[str, Any] = hf_bucket_url(snake_case_ , filename=snake_case_ , use_cdn=snake_case_ )
try:
# Load from URL or cache if already cached
UpperCAmelCase_ : Optional[Any] = cached_path(
snake_case_ , cache_dir=snake_case_ , force_download=snake_case_ , proxies=snake_case_ , resume_download=snake_case_ , local_files_only=snake_case_ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
UpperCAmelCase_ : Optional[int] = Config.load_yaml(snake_case_ )
except EnvironmentError:
UpperCAmelCase_ : Optional[Any] = 'Can\'t load config for'
raise EnvironmentError(snake_case_ )
if resolved_config_file == config_file:
print('loading configuration file from path' )
else:
print('loading configuration file cache' )
return Config.load_yaml(snake_case_ ), kwargs
def _lowerCamelCase ( lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
UpperCAmelCase_ : Dict = torch.load('dump.pt' , map_location=in_tensor.device )
UpperCAmelCase_ : Optional[Any] = in_tensor.numpy()
UpperCAmelCase_ : Optional[Any] = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(lowerCamelCase_ , lowerCamelCase_ , rtol=0.01 , atol=0.1 ), (
F'''{sum([1 for x in np.isclose(lowerCamelCase_ , lowerCamelCase_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception('tensors are all good' )
# Hugging face functions below
def _lowerCamelCase ( lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = urlparse(lowerCamelCase_ )
return parsed.scheme in ("http", "https")
def _lowerCamelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str , lowerCamelCase_ : int=True ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
UpperCAmelCase_ : Optional[int] = '/' not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def _lowerCamelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int=None , lowerCamelCase_ : Dict=0 , lowerCamelCase_ : List[Any]=None , ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = 'python/{}'.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
ua += "; " + "; ".join('{}/{}'.format(lowerCamelCase_ , lowerCamelCase_ ) for k, v in user_agent.items() )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
ua += "; " + user_agent
UpperCAmelCase_ : int = {'user-agent': ua}
if resume_size > 0:
UpperCAmelCase_ : Optional[int] = 'bytes=%d-' % (resume_size,)
UpperCAmelCase_ : Optional[Any] = requests.get(lowerCamelCase_ , stream=lowerCamelCase_ , proxies=lowerCamelCase_ , headers=lowerCamelCase_ )
if response.status_code == 416: # Range not satisfiable
return
UpperCAmelCase_ : List[str] = response.headers.get('Content-Length' )
UpperCAmelCase_ : Optional[Any] = resume_size + int(lowerCamelCase_ ) if content_length is not None else None
UpperCAmelCase_ : Optional[int] = tqdm(
unit='B' , unit_scale=lowerCamelCase_ , total=lowerCamelCase_ , initial=lowerCamelCase_ , desc='Downloading' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowerCamelCase_ ) )
temp_file.write(lowerCamelCase_ )
progress.close()
def _lowerCamelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Any=False , lowerCamelCase_ : str=None , lowerCamelCase_ : Optional[Any]=10 , lowerCamelCase_ : int=False , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : str=False , ):
"""simple docstring"""
if cache_dir is None:
UpperCAmelCase_ : List[str] = TRANSFORMERS_CACHE
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase_ : int = str(lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
UpperCAmelCase_ : str = None
if not local_files_only:
try:
UpperCAmelCase_ : List[str] = requests.head(lowerCamelCase_ , allow_redirects=lowerCamelCase_ , proxies=lowerCamelCase_ , timeout=lowerCamelCase_ )
if response.status_code == 200:
UpperCAmelCase_ : List[str] = response.headers.get('ETag' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
UpperCAmelCase_ : Tuple = url_to_filename(lowerCamelCase_ , lowerCamelCase_ )
# get cache path to put the file
UpperCAmelCase_ : Tuple = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowerCamelCase_ ):
return cache_path
else:
UpperCAmelCase_ : Dict = [
file
for file in fnmatch.filter(os.listdir(lowerCamelCase_ ) , filename + '.*' )
if not file.endswith('.json' ) and not file.endswith('.lock' )
]
if len(lowerCamelCase_ ) > 0:
return os.path.join(lowerCamelCase_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'Cannot find the requested files in the cached path and outgoing traffic has been'
' disabled. To enable model look-ups and downloads online, set \'local_files_only\''
' to False.' )
return None
# From now on, etag is not None.
if os.path.exists(lowerCamelCase_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
UpperCAmelCase_ : Optional[int] = cache_path + '.lock'
with FileLock(lowerCamelCase_ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowerCamelCase_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
UpperCAmelCase_ : str = cache_path + '.incomplete'
@contextmanager
def _resumable_file_manager():
with open(lowerCamelCase_ , 'a+b' ) as f:
yield f
UpperCAmelCase_ : Tuple = _resumable_file_manager
if os.path.exists(lowerCamelCase_ ):
UpperCAmelCase_ : Optional[Any] = os.stat(lowerCamelCase_ ).st_size
else:
UpperCAmelCase_ : Optional[Any] = 0
else:
UpperCAmelCase_ : str = partial(tempfile.NamedTemporaryFile , dir=lowerCamelCase_ , delete=lowerCamelCase_ )
UpperCAmelCase_ : str = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'%s not found in cache or force_download set to True, downloading to %s' , lowerCamelCase_ , temp_file.name , )
http_get(
lowerCamelCase_ , lowerCamelCase_ , proxies=lowerCamelCase_ , resume_size=lowerCamelCase_ , user_agent=lowerCamelCase_ , )
os.replace(temp_file.name , lowerCamelCase_ )
UpperCAmelCase_ : int = {'url': url, 'etag': etag}
UpperCAmelCase_ : Dict = cache_path + '.json'
with open(lowerCamelCase_ , 'w' ) as meta_file:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
return cache_path
def _lowerCamelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=None ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = url.encode('utf-8' )
UpperCAmelCase_ : Optional[int] = shaaaa(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = url_hash.hexdigest()
if etag:
UpperCAmelCase_ : int = etag.encode('utf-8' )
UpperCAmelCase_ : List[str] = shaaaa(lowerCamelCase_ )
filename += "." + etag_hash.hexdigest()
if url.endswith('.h5' ):
filename += ".h5"
return filename
def _lowerCamelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Tuple=False , ):
"""simple docstring"""
if cache_dir is None:
UpperCAmelCase_ : Optional[int] = TRANSFORMERS_CACHE
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase_ : Optional[int] = str(lowerCamelCase_ )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase_ : Any = str(lowerCamelCase_ )
if is_remote_url(lowerCamelCase_ ):
# URL, so get it from the cache (downloading if necessary)
UpperCAmelCase_ : Tuple = get_from_cache(
lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , user_agent=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
elif os.path.exists(lowerCamelCase_ ):
# File, and it exists.
UpperCAmelCase_ : Union[str, Any] = url_or_filename
elif urlparse(lowerCamelCase_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('file {} not found'.format(lowerCamelCase_ ) )
else:
# Something unknown
raise ValueError('unable to parse {} as a URL or as a local path'.format(lowerCamelCase_ ) )
if extract_compressed_file:
if not is_zipfile(lowerCamelCase_ ) and not tarfile.is_tarfile(lowerCamelCase_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
UpperCAmelCase_ : List[Any] = os.path.split(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = output_file.replace('.' , '-' ) + '-extracted'
UpperCAmelCase_ : Any = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isdir(lowerCamelCase_ ) and os.listdir(lowerCamelCase_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
UpperCAmelCase_ : int = output_path + '.lock'
with FileLock(lowerCamelCase_ ):
shutil.rmtree(lowerCamelCase_ , ignore_errors=lowerCamelCase_ )
os.makedirs(lowerCamelCase_ )
if is_zipfile(lowerCamelCase_ ):
with ZipFile(lowerCamelCase_ , 'r' ) as zip_file:
zip_file.extractall(lowerCamelCase_ )
zip_file.close()
elif tarfile.is_tarfile(lowerCamelCase_ ):
UpperCAmelCase_ : str = tarfile.open(lowerCamelCase_ )
tar_file.extractall(lowerCamelCase_ )
tar_file.close()
else:
raise EnvironmentError('Archive format of {} could not be identified'.format(lowerCamelCase_ ) )
return output_path_extracted
return output_path
def _lowerCamelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : List[str]="," ):
"""simple docstring"""
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ):
with open(lowerCamelCase_ ) as f:
UpperCAmelCase_ : int = eval(f.read() )
else:
UpperCAmelCase_ : Union[str, Any] = requests.get(lowerCamelCase_ )
try:
UpperCAmelCase_ : List[Any] = requests.json()
except Exception:
UpperCAmelCase_ : List[str] = req.content.decode()
assert data is not None, "could not connect"
try:
UpperCAmelCase_ : Tuple = eval(lowerCamelCase_ )
except Exception:
UpperCAmelCase_ : Dict = data.split('\n' )
req.close()
return data
def _lowerCamelCase ( lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCAmelCase_ : int = requests.get(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _lowerCamelCase ( lowerCamelCase_ : Dict ):
"""simple docstring"""
UpperCAmelCase_ : int = url.split('/' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowerCamelCase_ )
with open(lowerCamelCase_ , 'rb' ) as stream:
UpperCAmelCase_ : Optional[int] = pkl.load(lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = weights.pop('model' )
UpperCAmelCase_ : Tuple = {}
for k, v in model.items():
UpperCAmelCase_ : str = torch.from_numpy(lowerCamelCase_ )
if "running_var" in k:
UpperCAmelCase_ : int = torch.tensor([0] )
UpperCAmelCase_ : Optional[Any] = k.replace('running_var' , 'num_batches_tracked' )
UpperCAmelCase_ : Optional[int] = zero
return new
def _lowerCamelCase ( ):
"""simple docstring"""
print(F'''{os.path.abspath(os.path.join(lowerCamelCase_ , os.pardir ) )}/demo.ipynb''' )
def _lowerCamelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Any="RGB" ):
"""simple docstring"""
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ):
UpperCAmelCase_ : int = cva.imread(lowerCamelCase_ )
else:
UpperCAmelCase_ : Union[str, Any] = get_image_from_url(lowerCamelCase_ )
assert img is not None, F'''could not connect to: {im}'''
UpperCAmelCase_ : Dict = cva.cvtColor(lowerCamelCase_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
UpperCAmelCase_ : str = img[:, :, ::-1]
return img
def _lowerCamelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple=1 ):
"""simple docstring"""
return (images[i : i + batch] for i in range(0 , len(lowerCamelCase_ ) , lowerCamelCase_ ))
| 703
|
'''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : str , lowerCamelCase_ : int ):
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(lowerCamelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 389
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=18 , SCREAMING_SNAKE_CASE__=30 , SCREAMING_SNAKE_CASE__=400 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE__=[0.5, 0.5, 0.5] , ) -> int:
A__ = size if size is not None else {"shortest_edge": 18}
A__ = crop_size if crop_size is not None else {"height": 18, "width": 18}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_center_crop
A__ = crop_size
A__ = do_normalize
A__ = image_mean
A__ = image_std
def snake_case__ ( self ) -> int:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
A__ : Tuple = LevitImageProcessor if is_vision_available() else None
def snake_case__ ( self ) -> Any:
A__ = LevitImageProcessingTester(self )
@property
def snake_case__ ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self ) -> List[Any]:
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , "image_mean" ) )
self.assertTrue(hasattr(snake_case_ , "image_std" ) )
self.assertTrue(hasattr(snake_case_ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case_ , "do_resize" ) )
self.assertTrue(hasattr(snake_case_ , "do_center_crop" ) )
self.assertTrue(hasattr(snake_case_ , "size" ) )
def snake_case__ ( self ) -> Dict:
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def snake_case__ ( self ) -> List[str]:
pass
def snake_case__ ( self ) -> Optional[int]:
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(snake_case_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def snake_case__ ( self ) -> Union[str, Any]:
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(snake_case_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def snake_case__ ( self ) -> Any:
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A__ = image_processing(snake_case_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 104
|
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
SCREAMING_SNAKE_CASE_ = datasets.logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
SCREAMING_SNAKE_CASE_ = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
SCREAMING_SNAKE_CASE_ = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase="dummy_doc" ):
__lowerCAmelCase = {doc: key_lines}
__lowerCAmelCase = {doc: sys_lines}
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase , __lowerCAmelCase = reader.get_doc_mentions(_lowerCAmelCase , key_doc_lines[doc] , _lowerCAmelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
__lowerCAmelCase = reader.set_annotated_parse_trees(_lowerCAmelCase , key_doc_lines[doc] , _lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase , __lowerCAmelCase = reader.get_doc_mentions(_lowerCAmelCase , sys_doc_lines[doc] , _lowerCAmelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
__lowerCAmelCase = reader.set_annotated_parse_trees(_lowerCAmelCase , key_doc_lines[doc] , _lowerCAmelCase , _lowerCAmelCase )
if remove_nested:
__lowerCAmelCase , __lowerCAmelCase = reader.remove_nested_coref_mentions(_lowerCAmelCase , _lowerCAmelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__lowerCAmelCase , __lowerCAmelCase = reader.remove_nested_coref_mentions(_lowerCAmelCase , _lowerCAmelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__lowerCAmelCase = reader.get_mention_assignments(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = reader.get_mention_assignments(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
"""Number of resulting singleton clusters in the key """
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
"""files, respectively""" )
return doc_coref_infos
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = get_coref_infos(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for name, metric in metrics:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = evaluator.evaluate_documents(_lowerCAmelCase , _lowerCAmelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 100:.2f}""" , f""" Precision: {precision * 100:.2f}""" , f""" F1: {fa * 100:.2f}""" , )
if conll_subparts_num == 3:
__lowerCAmelCase = (conll / 3) * 100
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({"""conll_score""": conll} )
return output_scores
def lowercase (_lowerCAmelCase ):
__lowerCAmelCase = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
__lowerCAmelCase = line.split()[5]
if not parse_col == "-":
__lowerCAmelCase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def A__ ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def A__ ( self , snake_case_ , snake_case_ , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False ) -> str:
__lowerCAmelCase = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
__lowerCAmelCase = util.check_gold_parse_annotation(snake_case_ )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__lowerCAmelCase = evaluate(
key_lines=snake_case_ , sys_lines=snake_case_ , metrics=snake_case_ , NP_only=snake_case_ , remove_nested=snake_case_ , keep_singletons=snake_case_ , min_span=snake_case_ , )
return score
| 465
| 0
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowerCAmelCase = logging.get_logger(__name__)
# General docstring
_lowerCAmelCase = "RegNetConfig"
# Base docstring
_lowerCAmelCase = "facebook/regnet-y-040"
_lowerCAmelCase = [1, 1_088, 7, 7]
# Image classification docstring
_lowerCAmelCase = "facebook/regnet-y-040"
_lowerCAmelCase = "tabby, tabby cat"
_lowerCAmelCase = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : str , _A : int , _A : int = 3 , _A : int = 1 , _A : int = 1 , _A : Optional[str] = "relu" , **_A : Any , ):
super().__init__(**_A )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_UpperCamelCase = tf.keras.layers.ConvaD(
filters=_A , kernel_size=_A , strides=_A , padding='''VALID''' , groups=_A , use_bias=_A , name='''convolution''' , )
_UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
_UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity
def UpperCamelCase_ ( self : Any , _A : Any ):
_UpperCamelCase = self.convolution(self.padding(_A ) )
_UpperCamelCase = self.normalization(_A )
_UpperCamelCase = self.activation(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] , _A : RegNetConfig , **_A : Any ):
super().__init__(**_A )
_UpperCamelCase = config.num_channels
_UpperCamelCase = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def UpperCamelCase_ ( self : List[str] , _A : Optional[int] ):
_UpperCamelCase = shape_list(_A )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_UpperCamelCase = tf.transpose(_A , perm=(0, 2, 3, 1) )
_UpperCamelCase = self.embedder(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : str , _A : int , _A : int = 2 , **_A : Optional[Any] ):
super().__init__(**_A )
_UpperCamelCase = tf.keras.layers.ConvaD(
filters=_A , kernel_size=1 , strides=_A , use_bias=_A , name='''convolution''' )
_UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def UpperCamelCase_ ( self : str , _A : tf.Tensor , _A : bool = False ):
return self.normalization(self.convolution(_A ) , training=_A )
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : Dict , _A : int , _A : int , **_A : Dict ):
super().__init__(**_A )
_UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' )
_UpperCamelCase = [
tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=_A , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def UpperCamelCase_ ( self : List[str] , _A : List[Any] ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
_UpperCamelCase = self.pooler(_A )
for layer_module in self.attention:
_UpperCamelCase = layer_module(_A )
_UpperCamelCase = hidden_state * pooled
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : str ):
super().__init__(**_A )
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = max(1 , out_channels // config.groups_width )
_UpperCamelCase = (
TFRegNetShortCut(_A , stride=_A , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_UpperCamelCase = [
TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
_A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.2''' ),
]
_UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Dict , _A : Tuple ):
_UpperCamelCase = hidden_state
for layer_module in self.layers:
_UpperCamelCase = layer_module(_A )
_UpperCamelCase = self.shortcut(_A )
hidden_state += residual
_UpperCamelCase = self.activation(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , _A : RegNetConfig , _A : int , _A : int , _A : int = 1 , **_A : int ):
super().__init__(**_A )
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = max(1 , out_channels // config.groups_width )
_UpperCamelCase = (
TFRegNetShortCut(_A , stride=_A , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
_UpperCamelCase = [
TFRegNetConvLayer(_A , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
_A , stride=_A , groups=_A , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(_A , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(_A , kernel_size=1 , activation=_A , name='''layer.3''' ),
]
_UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Tuple , _A : List[Any] ):
_UpperCamelCase = hidden_state
for layer_module in self.layers:
_UpperCamelCase = layer_module(_A )
_UpperCamelCase = self.shortcut(_A )
hidden_state += residual
_UpperCamelCase = self.activation(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : Tuple , _A : RegNetConfig , _A : int , _A : int , _A : int = 2 , _A : int = 2 , **_A : Union[str, Any] ):
super().__init__(**_A )
_UpperCamelCase = TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
_UpperCamelCase = [
# downsampling is done in the first layer with stride of 2
layer(_A , _A , _A , stride=_A , name='''layers.0''' ),
*[layer(_A , _A , _A , name=F"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def UpperCamelCase_ ( self : Union[str, Any] , _A : Optional[int] ):
for layer_module in self.layers:
_UpperCamelCase = layer_module(_A )
return hidden_state
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , _A : RegNetConfig , **_A : List[str] ):
super().__init__(**_A )
_UpperCamelCase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
_A , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
_UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(_A , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(_A , _A , _A , depth=_A , name=F"""stages.{i+1}""" ) )
def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : bool = False , _A : bool = True ):
_UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
_UpperCamelCase = stage_module(_A )
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=_A , hidden_states=_A )
@keras_serializable
class lowerCAmelCase_ ( tf.keras.layers.Layer ):
UpperCAmelCase = RegNetConfig
def __init__( self : int , _A : Tuple , **_A : int ):
super().__init__(**_A )
_UpperCamelCase = config
_UpperCamelCase = TFRegNetEmbeddings(_A , name='''embedder''' )
_UpperCamelCase = TFRegNetEncoder(_A , name='''encoder''' )
_UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=_A , name='''pooler''' )
@unpack_inputs
def UpperCamelCase_ ( self : Optional[int] , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , ):
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.embedder(_A , training=_A )
_UpperCamelCase = self.encoder(
_A , output_hidden_states=_A , return_dict=_A , training=_A )
_UpperCamelCase = encoder_outputs[0]
_UpperCamelCase = self.pooler(_A )
# Change to NCHW output format have uniformity in the modules
_UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) )
_UpperCamelCase = tf.transpose(_A , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_UpperCamelCase = tuple([tf.transpose(_A , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_A , pooler_output=_A , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = RegNetConfig
UpperCAmelCase = "regnet"
UpperCAmelCase = "pixel_values"
@property
def UpperCamelCase_ ( self : Tuple ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
_lowerCAmelCase = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowerCAmelCase = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top.", __lowercase, )
class lowerCAmelCase_ ( __lowercase ):
def __init__( self : List[Any] , _A : RegNetConfig , *_A : Optional[int] , **_A : Tuple ):
super().__init__(_A , *_A , **_A )
_UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_A , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase_ ( self : Any , _A : tf.Tensor , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : Optional[int]=False , ):
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.regnet(
pixel_values=_A , output_hidden_states=_A , return_dict=_A , training=_A , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ", __lowercase, )
class lowerCAmelCase_ ( __lowercase, __lowercase ):
def __init__( self : List[Any] , _A : RegNetConfig , *_A : Any , **_A : int ):
super().__init__(_A , *_A , **_A )
_UpperCamelCase = config.num_labels
_UpperCamelCase = TFRegNetMainLayer(_A , name='''regnet''' )
# classification head
_UpperCamelCase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(_A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_A , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase_ ( self : str , _A : tf.Tensor = None , _A : tf.Tensor = None , _A : bool = None , _A : bool = None , _A : Any=False , ):
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.regnet(
_A , output_hidden_states=_A , return_dict=_A , training=_A )
_UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
_UpperCamelCase = self.classifier[0](_A )
_UpperCamelCase = self.classifier[1](_A )
_UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=_A , logits=_A )
if not return_dict:
_UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=_A , logits=_A , hidden_states=outputs.hidden_states )
| 701
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_lowerCAmelCase = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_UpperCamelCase = self.diffusers_dir
shutil.copy(
os.path.join(_A , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def UpperCamelCase_ ( self : Union[str, Any] , _A : Tuple , _A : Optional[Any] , _A : Dict , _A : List[str]=None ):
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
_UpperCamelCase = black.format_str(_A , mode=_A )
_UpperCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(_A , '''w''' , newline='''\n''' ) as f:
f.write(_A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(_A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=_A )
with open(_A , '''r''' ) as f:
self.assertTrue(f.read() , _A )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(_A , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
# Base copy consistency
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , _A , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , _A ) , )
# Copy consistency with a really long name
_UpperCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , _A , _A ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , _A , overwrite_result=re.sub('''DDPM''' , '''Test''' , _A ) , )
| 71
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=30 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=10 , _lowerCAmelCase=0.02 , _lowerCAmelCase=None , _lowerCAmelCase=2 , ) -> int:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scope
_lowerCAmelCase = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase = (image_size // patch_size) ** 2
_lowerCAmelCase = num_patches + 1
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _snake_case ( self ) -> Any:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
_lowerCAmelCase = ViTModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Tuple:
_lowerCAmelCase = ViTForMaskedImageModeling(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowerCAmelCase = 1
_lowerCAmelCase = ViTForMaskedImageModeling(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
_lowerCAmelCase = self.type_sequence_label_size
_lowerCAmelCase = ViTForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = model(_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase = 1
_lowerCAmelCase = ViTForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
_lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase = model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self ) -> int:
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __magic_name__ ,__magic_name__ ,unittest.TestCase ):
__lowerCamelCase : List[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__lowerCamelCase : Tuple = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : str = False
__lowerCamelCase : List[Any] = False
__lowerCamelCase : int = False
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = ViTModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def _snake_case ( self ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def _snake_case ( self ) -> Any:
pass
def _snake_case ( self ) -> Any:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_lowerCAmelCase )
_lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def _snake_case ( self ) -> int:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowerCAmelCase )
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def _snake_case ( self ) -> Tuple:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = ViTModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def __a():
'''simple docstring'''
_lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@cached_property
def _snake_case ( self ) -> Any:
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def _snake_case ( self ) -> Any:
_lowerCAmelCase = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(_lowerCAmelCase )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase = model(**_lowerCAmelCase )
# verify the logits
_lowerCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def _snake_case ( self ) -> List[Any]:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_lowerCAmelCase = ViTModel.from_pretrained("facebook/dino-vits8" ).to(_lowerCAmelCase )
_lowerCAmelCase = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=_lowerCAmelCase , return_tensors="pt" )
_lowerCAmelCase = inputs.pixel_values.to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase , interpolate_pos_encoding=_lowerCAmelCase )
# verify the logits
_lowerCAmelCase = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , _lowerCAmelCase )
_lowerCAmelCase = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _snake_case ( self ) -> int:
_lowerCAmelCase = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=_lowerCAmelCase , return_tensors="pt" )
_lowerCAmelCase = inputs.pixel_values.to(_lowerCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_lowerCAmelCase = model(_lowerCAmelCase )
| 18
|
from __future__ import annotations
_lowercase : Optional[int] =1.6021E-19 # units = C
def lowerCAmelCase_ ( _lowercase : float , _lowercase : float , _lowercase : float , ) -> tuple[str, float]:
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0) != 1:
raise ValueError("""You cannot supply more or less than 2 values""")
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""")
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""")
elif mobility < 0:
raise ValueError("""mobility cannot be negative""")
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136
| 0
|
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( lowercase ):
if not nums:
raise ValueError("""List is empty""" )
return sum(lowercase ) / len(lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36
|
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger("""transformers.models.speecht5""")
def __magic_name__ ( lowercase , lowercase , lowercase ):
hf_model.apply_weight_norm()
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""input_conv.weight_g"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''upsamples.{i}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[str] =checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: Union[str, Any] =checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Dict =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
SCREAMING_SNAKE_CASE_: Any =checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
SCREAMING_SNAKE_CASE_: List[Any] =checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
SCREAMING_SNAKE_CASE_: Tuple =checkpoint["""output_conv.1.weight_g"""]
SCREAMING_SNAKE_CASE_: List[str] =checkpoint["""output_conv.1.weight_v"""]
SCREAMING_SNAKE_CASE_: Optional[int] =checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , ):
if config_path is not None:
SCREAMING_SNAKE_CASE_: List[Any] =SpeechTaHifiGanConfig.from_pretrained(lowercase )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] =SpeechTaHifiGanConfig()
SCREAMING_SNAKE_CASE_: Union[str, Any] =SpeechTaHifiGan(lowercase )
SCREAMING_SNAKE_CASE_: Any =torch.load(lowercase )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =np.load(lowercase )
SCREAMING_SNAKE_CASE_: Any =stats[0].reshape(-1 )
SCREAMING_SNAKE_CASE_: str =stats[1].reshape(-1 )
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
SCREAMING_SNAKE_CASE_: Dict =torch.from_numpy(lowercase ).float()
model.save_pretrained(lowercase )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 36
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__snake_case :int =None
__snake_case :Optional[Any] =logging.get_logger(__name__)
__snake_case :Tuple ={'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
__snake_case :Any ={
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
__snake_case :Union[str, Any] ={
'google/rembert': 256,
}
__snake_case :Union[str, Any] ='▁'
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : str = VOCAB_FILES_NAMES
A_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Any = RemBertTokenizer
def __init__( self : int , __UpperCamelCase : Dict=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Any=True , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : List[str]=False , __UpperCamelCase : int="[CLS]" , __UpperCamelCase : List[str]="[SEP]" , __UpperCamelCase : Union[str, Any]="<unk>" , __UpperCamelCase : List[str]="[SEP]" , __UpperCamelCase : Optional[Any]="<pad>" , __UpperCamelCase : Optional[Any]="[CLS]" , __UpperCamelCase : Any="[MASK]" , **__UpperCamelCase : int , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
A = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , **__UpperCamelCase , )
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = False if not self.vocab_file else True
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1]
def __UpperCamelCase ( self : Tuple , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : Dict , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCamelCase ):
logger.error('Vocabulary path ({}) should be a directory'.format(__UpperCamelCase ) )
return
A = os.path.join(
__UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,)
| 106
|
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__snake_case :Any =None
try:
import msvcrt
except ImportError:
__snake_case :Union[str, Any] =None
try:
import fcntl
except ImportError:
__snake_case :str =None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__snake_case :str =OSError
# Data
# ------------------------------------------------
__snake_case :Any =[
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
__snake_case :str ='3.0.12'
__snake_case :str =None
def lowerCamelCase_ ( ) -> List[str]:
'''simple docstring'''
global _logger
A = _logger or logging.getLogger(__name__ )
return _logger
class lowerCAmelCase__ ( _lowerCamelCase ):
def __init__( self : Tuple , __UpperCamelCase : Union[str, Any] ) -> List[Any]:
A = lock_file
return None
def __str__( self : List[Any] ) -> int:
A = f'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class lowerCAmelCase__ :
def __init__( self : int , __UpperCamelCase : Union[str, Any] ) -> List[str]:
A = lock
return None
def __enter__( self : Dict ) -> Dict:
return self.lock
def __exit__( self : int , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any ) -> Optional[int]:
self.lock.release()
return None
class lowerCAmelCase__ :
def __init__( self : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=-1 , __UpperCamelCase : Optional[Any]=None ) -> Dict:
A = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
A = self.hash_filename_if_too_long(__UpperCamelCase , __UpperCamelCase )
# The path to the lock file.
A = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
A = None
# The default timeout value.
A = timeout
# We use this lock primarily for the lock counter.
A = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
A = 0
return None
@property
def __UpperCamelCase ( self : str ) -> Union[str, Any]:
return self._lock_file
@property
def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
return self._timeout
@timeout.setter
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : Any ) -> Tuple:
A = float(__UpperCamelCase )
return None
def __UpperCamelCase ( self : Optional[Any] ) -> Any:
raise NotImplementedError()
def __UpperCamelCase ( self : int ) -> str:
raise NotImplementedError()
@property
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
return self._lock_file_fd is not None
def __UpperCamelCase ( self : int , __UpperCamelCase : List[str]=None , __UpperCamelCase : Any=0.0_5 ) -> Any:
# Use the default timeout, if no timeout is provided.
if timeout is None:
A = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
A = id(self )
A = self._lock_file
A = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(f'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(__UpperCamelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
A = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Tuple=False ) -> Tuple:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
A = id(self )
A = self._lock_file
logger().debug(f'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
A = 0
logger().debug(f'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self : int ) -> Dict:
self.acquire()
return self
def __exit__( self : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str ) -> Dict:
self.release()
return None
def __del__( self : Union[str, Any] ) -> Optional[int]:
self.release(force=__UpperCamelCase )
return None
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : int ) -> str:
A = os.path.basename(__UpperCamelCase )
if len(__UpperCamelCase ) > max_length and max_length > 0:
A = os.path.dirname(__UpperCamelCase )
A = str(hash(__UpperCamelCase ) )
A = filename[: max_length - len(__UpperCamelCase ) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(__UpperCamelCase , __UpperCamelCase )
else:
return path
class lowerCAmelCase__ ( _lowerCamelCase ):
def __init__( self : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple=-1 , __UpperCamelCase : Optional[Any]=None ) -> Union[str, Any]:
from .file_utils import relative_to_absolute_path
super().__init__(__UpperCamelCase , timeout=__UpperCamelCase , max_filename_length=__UpperCamelCase )
A = '\\\\?\\' + relative_to_absolute_path(self.lock_file )
def __UpperCamelCase ( self : Any ) -> Any:
A = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
A = os.open(self._lock_file , __UpperCamelCase )
except OSError:
pass
else:
try:
msvcrt.locking(__UpperCamelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__UpperCamelCase )
else:
A = fd
return None
def __UpperCamelCase ( self : Optional[Any] ) -> List[Any]:
A = self._lock_file_fd
A = None
msvcrt.locking(__UpperCamelCase , msvcrt.LK_UNLCK , 1 )
os.close(__UpperCamelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCAmelCase__ ( _lowerCamelCase ):
def __init__( self : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : List[Any]=-1 , __UpperCamelCase : Dict=None ) -> Dict:
A = os.statvfs(os.path.dirname(__UpperCamelCase ) ).f_namemax
super().__init__(__UpperCamelCase , timeout=__UpperCamelCase , max_filename_length=__UpperCamelCase )
def __UpperCamelCase ( self : Any ) -> int:
A = os.O_RDWR | os.O_CREAT | os.O_TRUNC
A = os.open(self._lock_file , __UpperCamelCase )
try:
fcntl.flock(__UpperCamelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__UpperCamelCase )
else:
A = fd
return None
def __UpperCamelCase ( self : Optional[int] ) -> int:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
A = self._lock_file_fd
A = None
fcntl.flock(__UpperCamelCase , fcntl.LOCK_UN )
os.close(__UpperCamelCase )
return None
class lowerCAmelCase__ ( _lowerCamelCase ):
def __UpperCamelCase ( self : int ) -> Optional[int]:
A = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
A = os.open(self._lock_file , __UpperCamelCase )
except OSError:
pass
else:
A = fd
return None
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
os.close(self._lock_file_fd )
A = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__snake_case :List[str] =None
if msvcrt:
__snake_case :List[Any] =WindowsFileLock
elif fcntl:
__snake_case :Any =UnixFileLock
else:
__snake_case :Tuple =SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 106
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = "▁"
lowercase = {"vocab_file": "sentencepiece.bpe.model"}
lowercase = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"
),
}
}
lowercase = {
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
lowercase = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = []
lowerCAmelCase = []
def __init__( self , a , a="<s>" , a="</s>" , a="</s>" , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , a=None , a=None , a=None , a = None , a=None , a=False , **a , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case_ = legacy_behaviour
super().__init__(
bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , tokenizer_file=a , src_lang=a , tgt_lang=a , additional_special_tokens=a , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=a , **a , )
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a ) )
snake_case_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case_ = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case_ = 1
snake_case_ = len(self.sp_model )
snake_case_ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(a )
}
snake_case_ = {v: k for k, v in self.lang_code_to_id.items()}
snake_case_ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
snake_case_ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
snake_case_ = src_lang if src_lang is not None else 'eng_Latn'
snake_case_ = self.lang_code_to_id[self._src_lang]
snake_case_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Tuple:
snake_case_ = self.__dict__.copy()
snake_case_ = None
snake_case_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , a ) -> Dict:
snake_case_ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _UpperCamelCase ( self ) -> Optional[int]:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _UpperCamelCase ( self ) -> str:
return self._src_lang
@src_lang.setter
def _UpperCamelCase ( self , a ) -> None:
snake_case_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase ( self , a , a = None , a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
snake_case_ = [1] * len(self.prefix_tokens )
snake_case_ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(a )) + suffix_ones
return prefix_ones + ([0] * len(a )) + ([0] * len(a )) + suffix_ones
def _UpperCamelCase ( self , a , a = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase ( self , a , a = None ) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self , a , a , a , a , **a ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
snake_case_ = src_lang
snake_case_ = self(a , add_special_tokens=a , return_tensors=a , **a )
snake_case_ = self.convert_tokens_to_ids(a )
snake_case_ = tgt_lang_id
return inputs
def _UpperCamelCase ( self ) -> Any:
snake_case_ = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCamelCase ( self , a ) -> List[str]:
return self.sp_model.encode(a , out_type=a )
def _UpperCamelCase ( self , a ) -> List[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ = self.sp_model.PieceToId(a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _UpperCamelCase ( self , a ) -> Any:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _UpperCamelCase ( self , a ) -> List[Any]:
snake_case_ = ''.join(a ).replace(a , ' ' ).strip()
return out_string
def _UpperCamelCase ( self , a , a = None ) -> Tuple[str]:
if not os.path.isdir(a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , 'wb' ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
def _UpperCamelCase ( self , a , a = "eng_Latn" , a = None , a = "fra_Latn" , **a , ) -> BatchEncoding:
snake_case_ = src_lang
snake_case_ = tgt_lang
return super().prepare_seqaseq_batch(a , a , **a )
def _UpperCamelCase ( self ) -> Dict:
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase ( self ) -> int:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase ( self , a ) -> None:
snake_case_ = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
snake_case_ = []
snake_case_ = [self.eos_token_id, self.cur_lang_code]
else:
snake_case_ = [self.cur_lang_code]
snake_case_ = [self.eos_token_id]
def _UpperCamelCase ( self , a ) -> None:
snake_case_ = self.lang_code_to_id[lang]
if self.legacy_behaviour:
snake_case_ = []
snake_case_ = [self.eos_token_id, self.cur_lang_code]
else:
snake_case_ = [self.cur_lang_code]
snake_case_ = [self.eos_token_id]
| 607
|
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=16 , a=2 , a=0.02 , a=4 , ) -> Optional[int]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_attention_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_choices
def _UpperCamelCase ( self ) -> List[Any]:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_attention_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCamelCase ( self ) -> int:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class UpperCamelCase_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = True
lowerCAmelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCamelCase ( self ) -> Union[str, Any]:
snake_case_ = FlaxRoFormerModelTester(self )
@slow
def _UpperCamelCase ( self ) -> Any:
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=a )
snake_case_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
@require_flax
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ) -> str:
snake_case_ = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
snake_case_ = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case_ = model(a )[0]
snake_case_ = 5_00_00
snake_case_ = (1, 6, vocab_size)
self.assertEqual(output.shape , a )
snake_case_ = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , a , atol=1E-4 ) )
| 607
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : Optional[int] = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 50
|
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
A__ : int = threading.Lock()
A__ : Optional[logging.Handler] = None
A__ : str = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
A__ : str = logging.WARNING
A__ : Union[str, Any] = True
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: List[Any] = os.getenv('''TRANSFORMERS_VERBOSITY''' , _UpperCamelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def _lowerCAmelCase ( ):
"""simple docstring"""
return __name__.split('''.''' )[0]
def _lowerCAmelCase ( ):
"""simple docstring"""
return logging.getLogger(_get_library_name() )
def _lowerCAmelCase ( ):
"""simple docstring"""
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
_lowercase: int = logging.StreamHandler() # Set sys.stderr as stream.
_lowercase: Dict = sys.stderr.flush
# Apply our default configuration to the library root logger.
_lowercase: Dict = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
_lowercase: Optional[Any] = False
def _lowerCAmelCase ( ):
"""simple docstring"""
global _default_handler
with _lock:
if not _default_handler:
return
_lowercase: Tuple = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
_lowercase: Dict = None
def _lowerCAmelCase ( ):
"""simple docstring"""
return log_levels
def _lowerCAmelCase ( _UpperCamelCase = None ):
"""simple docstring"""
if name is None:
_lowercase: Tuple = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(_UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_configure_library_root_logger()
_get_library_root_logger().setLevel(_UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
return set_verbosity(_UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
return set_verbosity(_UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
return set_verbosity(_UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
return set_verbosity(_UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def _lowerCAmelCase ( ):
"""simple docstring"""
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(_UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
_configure_library_root_logger()
_lowercase: str = False
def _lowerCAmelCase ( ):
"""simple docstring"""
_configure_library_root_logger()
_lowercase: List[str] = True
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Any = _get_library_root_logger().handlers
for handler in handlers:
_lowercase: int = logging.Formatter('''[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s''' )
handler.setFormatter(_UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: List[str] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(_UpperCamelCase )
def _lowerCAmelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
_lowercase: Any = os.getenv('''TRANSFORMERS_NO_ADVISORY_WARNINGS''' , _UpperCamelCase )
if no_advisory_warnings:
return
self.warning(*_UpperCamelCase , **_UpperCamelCase )
A__ : Optional[int] = warning_advice
@functools.lru_cache(_UpperCamelCase )
def _lowerCAmelCase ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
self.warning(*_UpperCamelCase , **_UpperCamelCase )
A__ : List[Any] = warning_once
class __magic_name__ :
def __init__( self , *A_ , **A_ ) -> Any: # pylint: disable=unused-argument
"""simple docstring"""
_lowercase: Tuple = args[0] if args else None
def __iter__( self ) -> Union[str, Any]:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self , A_ ) -> List[Any]:
"""simple docstring"""
def empty_fn(*A_ , **A_ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> Tuple:
"""simple docstring"""
return self
def __exit__( self , A_ , A_ , A_ ) -> Optional[Any]:
"""simple docstring"""
return
class __magic_name__ :
def __call__( self , *A_ , **A_ ) -> Dict:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm(*A_ , **A_ )
else:
return EmptyTqdm(*A_ , **A_ )
def lowercase_ ( self , *A_ , **A_ ) -> List[str]:
"""simple docstring"""
_lowercase: Optional[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*A_ , **A_ )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
A__ : str = _tqdm_cls()
def _lowerCAmelCase ( ):
"""simple docstring"""
global _tqdm_active
return bool(_tqdm_active )
def _lowerCAmelCase ( ):
"""simple docstring"""
global _tqdm_active
_lowercase: str = True
hf_hub_utils.enable_progress_bars()
def _lowerCAmelCase ( ):
"""simple docstring"""
global _tqdm_active
_lowercase: Union[str, Any] = False
hf_hub_utils.disable_progress_bars()
| 353
| 0
|
"""simple docstring"""
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a ( A_ , unittest.TestCase ):
A_ : Any = None
A_ : str = BloomTokenizerFast
A_ : List[Any] = BloomTokenizerFast
A_ : Union[str, Any] = True
A_ : Union[str, Any] = False
A_ : Tuple = '''tokenizer_file'''
A_ : Union[str, Any] = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def lowerCAmelCase_ ( self : int ) -> Dict:
super().setUp()
__a = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : Optional[int] , **lowerCamelCase_ : Any ) -> Tuple:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def lowerCAmelCase_ ( self : str ) -> Optional[int]:
__a = self.get_rust_tokenizer()
__a = ["""The quick brown fox</s>""", """jumps over the lazy dog</s>"""]
__a = [[21_75, 2_37_14, 7_31_73, 14_42_52, 2], [77, 13_26_19, 34_78, 3_68, 10_95_86, 3_54_33, 2]]
__a = tokenizer.batch_encode_plus(lowerCamelCase_ )["""input_ids"""]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
__a = tokenizer.batch_decode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase_ ( self : Optional[int] , lowerCamelCase_ : List[str]=6 ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a = self.rust_tokenizer_class.from_pretrained(lowerCamelCase_ , **lowerCamelCase_ )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
__a = """This is a simple input"""
__a = ["""This is a simple input 1""", """This is a simple input 2"""]
__a = ("""This is a simple input""", """This is a pair""")
__a = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
try:
tokenizer_r.encode(lowerCamelCase_ , max_length=lowerCamelCase_ )
tokenizer_r.encode_plus(lowerCamelCase_ , max_length=lowerCamelCase_ )
tokenizer_r.batch_encode_plus(lowerCamelCase_ , max_length=lowerCamelCase_ )
tokenizer_r.encode(lowerCamelCase_ , max_length=lowerCamelCase_ )
tokenizer_r.batch_encode_plus(lowerCamelCase_ , max_length=lowerCamelCase_ )
except ValueError:
self.fail("""Bloom Tokenizer should be able to deal with padding""" )
__a = None # Hotfixing padding = None
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" )
# Simple input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" )
# Simple input
self.assertRaises(
lowerCamelCase_ , tokenizer_r.batch_encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" , )
# Pair input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" )
# Pair input
self.assertRaises(lowerCamelCase_ , tokenizer_r.encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" )
# Pair input
self.assertRaises(
lowerCamelCase_ , tokenizer_r.batch_encode_plus , lowerCamelCase_ , max_length=lowerCamelCase_ , padding="""max_length""" , )
def lowerCAmelCase_ ( self : str ) -> str:
__a = self.get_rust_tokenizer()
__a = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=lowerCamelCase_ )
__a = next(iter(lowerCamelCase_ ) )["""premise"""] # pick up one data
__a = list(sample_data.values() )
__a = list(map(tokenizer.encode , lowerCamelCase_ ) )
__a = [tokenizer.decode(lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ ) for x in output_tokens]
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase_ ( self : List[str] ) -> List[str]:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 173
|
"""simple docstring"""
from __future__ import annotations
class a :
def __init__( self : List[str] , lowerCamelCase_ : list[list[int]] ) -> Any:
__a = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(lowerCamelCase_ ) != 0:
__a = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowerCamelCase_ ) != cols:
raise error
for value in row:
if not isinstance(lowerCamelCase_ , (int, float) ):
raise error
__a = rows
else:
__a = []
def lowerCAmelCase_ ( self : Dict ) -> list[list[int]]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def lowerCAmelCase_ ( self : int ) -> int:
return len(self.rows )
@property
def lowerCAmelCase_ ( self : str ) -> int:
return len(self.rows[0] )
@property
def lowerCAmelCase_ ( self : Optional[Any] ) -> tuple[int, int]:
return (self.num_rows, self.num_columns)
@property
def lowerCAmelCase_ ( self : List[Any] ) -> bool:
return self.order[0] == self.order[1]
def lowerCAmelCase_ ( self : Any ) -> Matrix:
__a = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowerCamelCase_ )
def lowerCAmelCase_ ( self : List[str] ) -> int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def lowerCAmelCase_ ( self : Any ) -> bool:
return bool(self.determinant() )
def lowerCAmelCase_ ( self : int , lowerCamelCase_ : int , lowerCamelCase_ : int ) -> int:
__a = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowerCamelCase_ ).determinant()
def lowerCAmelCase_ ( self : str , lowerCamelCase_ : int , lowerCamelCase_ : int ) -> int:
if (row + column) % 2 == 0:
return self.get_minor(lowerCamelCase_ , lowerCamelCase_ )
return -1 * self.get_minor(lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase_ ( self : Dict ) -> Matrix:
return Matrix(
[
[self.get_minor(lowerCamelCase_ , lowerCamelCase_ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def lowerCAmelCase_ ( self : Optional[Any] ) -> Matrix:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def lowerCAmelCase_ ( self : Union[str, Any] ) -> Matrix:
__a = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowerCamelCase_ )
def lowerCAmelCase_ ( self : List[str] ) -> Matrix:
__a = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self : Union[str, Any] ) -> str:
return str(self.rows )
def __str__( self : Optional[int] ) -> str:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(lowerCamelCase_ ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def lowerCAmelCase_ ( self : int , lowerCamelCase_ : list[int] , lowerCamelCase_ : int | None = None ) -> None:
__a = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise type_error
for value in row:
if not isinstance(lowerCamelCase_ , (int, float) ):
raise type_error
if len(lowerCamelCase_ ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(lowerCamelCase_ )
else:
__a = self.rows[0:position] + [row] + self.rows[position:]
def lowerCAmelCase_ ( self : int , lowerCamelCase_ : list[int] , lowerCamelCase_ : int | None = None ) -> None:
__a = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise type_error
for value in column:
if not isinstance(lowerCamelCase_ , (int, float) ):
raise type_error
if len(lowerCamelCase_ ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
__a = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
__a = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Tuple , lowerCamelCase_ : object ) -> bool:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : str , lowerCamelCase_ : object ) -> bool:
return not self == other
def __neg__( self : List[Any] ) -> Matrix:
return self * -1
def __add__( self : Union[str, Any] , lowerCamelCase_ : Matrix ) -> Matrix:
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : int , lowerCamelCase_ : Matrix ) -> Matrix:
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Union[str, Any] , lowerCamelCase_ : Matrix | int | float ) -> Matrix:
if isinstance(lowerCamelCase_ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(lowerCamelCase_ , lowerCamelCase_ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self : Optional[int] , lowerCamelCase_ : int ) -> Matrix:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
__a = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def lowerCAmelCase_ ( cls : Any , lowerCamelCase_ : list[int] , lowerCamelCase_ : list[int] ) -> int:
return sum(row[i] * column[i] for i in range(len(lowerCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173
| 1
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
A_ : Union[str, Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
A_ : Optional[int] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print('\n'.join(upper_files) + '\n')
A_ : Any = [file for file in filepaths if ' ' in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print('\n'.join(space_files) + '\n')
A_ : Optional[Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print('\n'.join(hyphen_files) + '\n')
A_ : Any = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print('\n'.join(nodir_files) + '\n')
A_ : Dict = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 57
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
A_ : Optional[Any] = data_utils.TransfoXLTokenizer
A_ : Union[str, Any] = data_utils.TransfoXLCorpus
A_ : Any = data_utils
A_ : Optional[Any] = data_utils
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCAmelCase__ , 'rb' ) as fp:
UpperCamelCase_: Union[str, Any] = pickle.load(UpperCAmelCase__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCamelCase_: Any = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
UpperCamelCase_: Union[str, Any] = corpus.vocab.__dict__
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: str = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , UpperCAmelCase__ )
UpperCamelCase_: str = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCamelCase_: Any = os.path.abspath(UpperCAmelCase__ )
UpperCamelCase_: Dict = os.path.abspath(UpperCAmelCase__ )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCamelCase_: List[str] = TransfoXLConfig()
else:
UpperCamelCase_: Optional[int] = TransfoXLConfig.from_json_file(UpperCAmelCase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
UpperCamelCase_: Union[str, Any] = TransfoXLLMHeadModel(UpperCAmelCase__ )
UpperCamelCase_: Tuple = load_tf_weights_in_transfo_xl(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
UpperCamelCase_: str = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCamelCase_: Union[str, Any] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''Save PyTorch model to {os.path.abspath(UpperCAmelCase__ )}''' )
torch.save(model.state_dict() , UpperCAmelCase__ )
print(F'''Save configuration file to {os.path.abspath(UpperCAmelCase__ )}''' )
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ : int = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
A_ : Tuple = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 57
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST",
"PegasusXForConditionalGeneration",
"PegasusXModel",
"PegasusXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 517
|
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class _lowerCAmelCase ( ctypes.Structure ):
"""simple docstring"""
snake_case_ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def __lowerCamelCase ( ) -> Optional[int]:
if os.name == "nt":
snake_case = CursorInfo()
snake_case = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__lowerCAmelCase , ctypes.byref(__lowerCAmelCase ) )
snake_case = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(__lowerCAmelCase , ctypes.byref(__lowerCAmelCase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def __lowerCamelCase ( ) -> Tuple:
if os.name == "nt":
snake_case = CursorInfo()
snake_case = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(__lowerCAmelCase , ctypes.byref(__lowerCAmelCase ) )
snake_case = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(__lowerCAmelCase , ctypes.byref(__lowerCAmelCase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def __lowerCamelCase ( ) -> Optional[Any]:
try:
hide_cursor()
yield
finally:
show_cursor()
| 517
| 1
|
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __A ( UpperCamelCase__ , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __A ( unittest.TestCase ):
@property
def A__ ( self :int ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : int =ort.SessionOptions()
__magic_name__ : int =False
return options
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Optional[Any] =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
__magic_name__ : Any =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
__magic_name__ : Union[str, Any] =OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__ : Tuple ="""A red cat sitting on a park bench"""
__magic_name__ : int =np.random.RandomState(0 )
__magic_name__ : List[str] =pipe(
prompt=__snake_case , image=__snake_case , mask_image=__snake_case , guidance_scale=7.5 , num_inference_steps=10 , generator=__snake_case , output_type="""np""" , )
__magic_name__ : Union[str, Any] =output.images
__magic_name__ : str =images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__magic_name__ : Tuple =np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Tuple =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
__magic_name__ : Tuple =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
__magic_name__ : Optional[int] =LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
__magic_name__ : Optional[Any] =OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=__snake_case , safety_checker=__snake_case , feature_extractor=__snake_case , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__ : str ="""A red cat sitting on a park bench"""
__magic_name__ : Optional[int] =np.random.RandomState(0 )
__magic_name__ : Optional[int] =pipe(
prompt=__snake_case , image=__snake_case , mask_image=__snake_case , guidance_scale=7.5 , num_inference_steps=20 , generator=__snake_case , output_type="""np""" , )
__magic_name__ : Union[str, Any] =output.images
__magic_name__ : Union[str, Any] =images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
__magic_name__ : Any =np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 21
|
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
A__ : Optional[Any] = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
A__ : Any = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
A__ : List[str] = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _snake_case ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any ) -> Tuple:
return float((preds == labels).mean() )
def _snake_case ( lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] ) -> Optional[Any]:
lowerCamelCase_ : int =simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : Optional[Any] =float(fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _snake_case ( lowerCamelCase__ : Tuple , lowerCamelCase__ : str ) -> int:
lowerCamelCase_ : Any =np.array(lowerCamelCase__ )
lowerCamelCase_ : int =np.array(lowerCamelCase__ )
lowerCamelCase_ : Optional[Any] =en_sentvecs.shape[0]
# mean centering
lowerCamelCase_ : int =en_sentvecs - np.mean(lowerCamelCase__ , axis=0 )
lowerCamelCase_ : Dict =in_sentvecs - np.mean(lowerCamelCase__ , axis=0 )
lowerCamelCase_ : Dict =cdist(lowerCamelCase__ , lowerCamelCase__ , "cosine" )
lowerCamelCase_ : str =np.array(range(lowerCamelCase__ ) )
lowerCamelCase_ : Any =sim.argsort(axis=1 )[:, :10]
lowerCamelCase_ : Optional[Any] =np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCAmelCase__ ( self : Optional[Any] ):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
"references": datasets.Value("int64" )
if self.config_name != "cvit-mkb-clsr"
else datasets.Sequence(datasets.Value("float32" ) ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" if self.config_name != "cvit-mkb-clsr" else None , )
def UpperCAmelCase__ ( self : List[str] , snake_case__ : Dict , snake_case__ : Optional[Any] ):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(snake_case__ , snake_case__ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(snake_case__ , snake_case__ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(snake_case__ , snake_case__ )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"wnli\", \"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", "
"\"cvit-mkb-clsr\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\", "
"\"wiki-ner\"]" )
| 153
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ : str = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Union[str, Any] = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : int = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Dict = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 712
|
'''simple docstring'''
import functools
from typing import Any
def __a ( __lowerCamelCase : str , __lowerCamelCase : list[str] ) -> bool:
'''simple docstring'''
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or len(__lowerCamelCase ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not all(
isinstance(__lowerCamelCase , __lowerCamelCase ) and len(__lowerCamelCase ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
lowercase_ = {}
lowercase_ = "WORD_KEEPER"
for word in words:
lowercase_ = trie
for c in word:
if c not in trie_node:
lowercase_ = {}
lowercase_ = trie_node[c]
lowercase_ = True
lowercase_ = len(__lowerCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(__lowerCamelCase : int ) -> bool:
if index == len_string:
return True
lowercase_ = trie
for i in range(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = trie_node.get(string[i] , __lowerCamelCase )
if trie_node is None:
return False
if trie_node.get(__lowerCamelCase , __lowerCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 461
| 0
|
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def lowerCAmelCase__ ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] )-> Optional[int]:
A__ = multiprocessing.Manager()
A__ = manager.list()
A__ = multiprocessing.Process(target=UpperCamelCase_ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('''timed out''' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] )-> Optional[Any]:
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
A__ = shutil.rmtree
A__ = os.rmdir
A__ = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
A__ = {}
with swallow_io():
with time_limit(UpperCamelCase_ ):
exec(UpperCamelCase_ , UpperCamelCase_ )
result.append('''passed''' )
except TimeoutException:
result.append('''timed out''' )
except BaseException as e:
result.append(f"failed: {e}" )
# Needed for cleaning up.
A__ = rmtree
A__ = rmdir
A__ = chdir
@contextlib.contextmanager
def lowerCAmelCase__ ( UpperCamelCase_ : Dict )-> int:
def signal_handler(UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] ):
raise TimeoutException('''Timed out!''' )
signal.setitimer(signal.ITIMER_REAL , UpperCamelCase_ )
signal.signal(signal.SIGALRM , UpperCamelCase_ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def lowerCAmelCase__ ( )-> Optional[Any]:
A__ = WriteOnlyStringIO()
with contextlib.redirect_stdout(UpperCamelCase_ ):
with contextlib.redirect_stderr(UpperCamelCase_ ):
with redirect_stdin(UpperCamelCase_ ):
yield
@contextlib.contextmanager
def lowerCAmelCase__ ( )-> List[Any]:
with tempfile.TemporaryDirectory() as dirname:
with chdir(UpperCamelCase_ ):
yield dirname
class _UpperCAmelCase ( A__ ):
pass
class _UpperCAmelCase ( io.StringIO ):
def snake_case_ ( self , *a__ , **a__):
raise OSError
def snake_case_ ( self , *a__ , **a__):
raise OSError
def snake_case_ ( self , *a__ , **a__):
raise OSError
def snake_case_ ( self , *a__ , **a__):
return False
class _UpperCAmelCase ( contextlib._RedirectStream ): # type: ignore
UpperCamelCase__ = '''stdin'''
@contextlib.contextmanager
def lowerCAmelCase__ ( UpperCamelCase_ : str )-> str:
if root == ".":
yield
return
A__ = os.getcwd()
os.chdir(UpperCamelCase_ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(UpperCamelCase_ )
def lowerCAmelCase__ ( UpperCamelCase_ : Dict=None )-> int:
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
A__ = None
A__ = None
import os
A__ = '''1'''
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
import shutil
A__ = None
A__ = None
A__ = None
import subprocess
A__ = None # type: ignore
A__ = None
import sys
A__ = None
A__ = None
A__ = None
A__ = None
A__ = None
| 632
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["LayoutLMv2FeatureExtractor"]
_lowercase = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 632
| 1
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def A__ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase: List[str] = tempfile.mkdtemp()
UpperCAmelCase: Tuple = BlipImageProcessor()
UpperCAmelCase: Optional[Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
UpperCAmelCase: Optional[Any] = BlipProcessor(__snake_case , __snake_case )
processor.save_pretrained(self.tmpdirname )
def A__ ( self , **__snake_case ) -> int:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__snake_case ).tokenizer
def A__ ( self , **__snake_case ) -> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__snake_case ).image_processor
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def A__ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase: str = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCAmelCase: Union[str, Any] = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase: Optional[Any] = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase: Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase: List[Any] = self.get_image_processor(do_normalize=__snake_case , padding_value=1.0 )
UpperCAmelCase: int = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __snake_case )
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase: str = self.get_image_processor()
UpperCAmelCase: str = self.get_tokenizer()
UpperCAmelCase: Optional[Any] = BlipProcessor(tokenizer=__snake_case , image_processor=__snake_case )
UpperCAmelCase: int = self.prepare_image_inputs()
UpperCAmelCase: Union[str, Any] = image_processor(__snake_case , return_tensors="np" )
UpperCAmelCase: Tuple = processor(images=__snake_case , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase: Any = self.get_image_processor()
UpperCAmelCase: Optional[int] = self.get_tokenizer()
UpperCAmelCase: Optional[Any] = BlipProcessor(tokenizer=__snake_case , image_processor=__snake_case )
UpperCAmelCase: Optional[int] = "lower newer"
UpperCAmelCase: str = processor(text=__snake_case )
UpperCAmelCase: Optional[Any] = tokenizer(__snake_case , return_token_type_ids=__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase: int = self.get_image_processor()
UpperCAmelCase: Dict = self.get_tokenizer()
UpperCAmelCase: str = BlipProcessor(tokenizer=__snake_case , image_processor=__snake_case )
UpperCAmelCase: Optional[Any] = "lower newer"
UpperCAmelCase: int = self.prepare_image_inputs()
UpperCAmelCase: List[Any] = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def A__ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase: str = self.get_image_processor()
UpperCAmelCase: Tuple = self.get_tokenizer()
UpperCAmelCase: Optional[int] = BlipProcessor(tokenizer=__snake_case , image_processor=__snake_case )
UpperCAmelCase: Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase: Tuple = processor.batch_decode(__snake_case )
UpperCAmelCase: Union[str, Any] = tokenizer.batch_decode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCAmelCase: List[Any] = self.get_image_processor()
UpperCAmelCase: List[str] = self.get_tokenizer()
UpperCAmelCase: Optional[int] = BlipProcessor(tokenizer=__snake_case , image_processor=__snake_case )
UpperCAmelCase: Optional[int] = "lower newer"
UpperCAmelCase: Union[str, Any] = self.prepare_image_inputs()
UpperCAmelCase: Any = processor(text=__snake_case , images=__snake_case )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 166
|
def __UpperCAmelCase ( snake_case_ : Any , snake_case_ : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase: Optional[Any] = ""
for i in table:
res += inp[i - 1]
return res
def __UpperCAmelCase ( snake_case_ : Optional[Any] ):
'''simple docstring'''
return data[1:] + data[0]
def __UpperCAmelCase ( snake_case_ : Any , snake_case_ : Optional[int] ):
'''simple docstring'''
UpperCAmelCase: Optional[int] = ""
for i in range(len(snake_case_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __UpperCAmelCase ( snake_case_ : int , snake_case_ : Tuple ):
'''simple docstring'''
UpperCAmelCase: List[str] = int("0b" + data[0] + data[-1] , 2 )
UpperCAmelCase: List[Any] = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def __UpperCAmelCase ( snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase: Tuple = message[:4]
UpperCAmelCase: List[str] = message[4:]
UpperCAmelCase: str = apply_table(snake_case_ , snake_case_ )
UpperCAmelCase: Dict = xor(snake_case_ , snake_case_ )
UpperCAmelCase: Dict = apply_sbox(snake_case_ , temp[:4] ) # noqa: E741
UpperCAmelCase: Any = apply_sbox(snake_case_ , temp[4:] )
UpperCAmelCase: List[Any] = "0" * (2 - len(snake_case_ )) + l # noqa: E741
UpperCAmelCase: Any = "0" * (2 - len(snake_case_ )) + r
UpperCAmelCase: Union[str, Any] = apply_table(l + r , snake_case_ )
UpperCAmelCase: List[Any] = xor(snake_case_ , snake_case_ )
return temp + right
if __name__ == "__main__":
snake_case_ : List[Any] = input('Enter 10 bit key: ')
snake_case_ : List[Any] = input('Enter 8 bit message: ')
snake_case_ : Dict = [6, 3, 7, 4, 8, 5, 1_0, 9]
snake_case_ : Optional[int] = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
snake_case_ : Union[str, Any] = [2, 4, 3, 1]
snake_case_ : Union[str, Any] = [2, 6, 3, 1, 4, 8, 5, 7]
snake_case_ : Optional[int] = [4, 1, 3, 5, 7, 2, 8, 6]
snake_case_ : Union[str, Any] = [4, 1, 2, 3, 2, 3, 4, 1]
snake_case_ : List[str] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
snake_case_ : Optional[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
snake_case_ : Optional[int] = apply_table(key, paa_table)
snake_case_ : Dict = temp[:5]
snake_case_ : Union[str, Any] = temp[5:]
snake_case_ : str = left_shift(left)
snake_case_ : Dict = left_shift(right)
snake_case_ : Tuple = apply_table(left + right, pa_table)
snake_case_ : Dict = left_shift(left)
snake_case_ : int = left_shift(right)
snake_case_ : List[str] = left_shift(left)
snake_case_ : List[Any] = left_shift(right)
snake_case_ : Optional[int] = apply_table(left + right, pa_table)
# encryption
snake_case_ : List[Any] = apply_table(message, IP)
snake_case_ : Any = function(expansion, sa, sa, keya, temp)
snake_case_ : int = temp[4:] + temp[:4]
snake_case_ : int = function(expansion, sa, sa, keya, temp)
snake_case_ : Optional[int] = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
snake_case_ : Tuple = apply_table(CT, IP)
snake_case_ : List[str] = function(expansion, sa, sa, keya, temp)
snake_case_ : int = temp[4:] + temp[:4]
snake_case_ : Tuple = function(expansion, sa, sa, keya, temp)
snake_case_ : Tuple = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 166
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ):
a__ : int = KandinskyImgaImgPipeline
a__ : Optional[int] = ["prompt", "image_embeds", "negative_image_embeds", "image"]
a__ : Optional[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
a__ : Tuple = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
a__ : Optional[Any] = False
@property
def a ( self : Tuple ):
return 32
@property
def a ( self : int ):
return 32
@property
def a ( self : Optional[Any] ):
return self.time_input_dim
@property
def a ( self : Any ):
return self.time_input_dim * 4
@property
def a ( self : Union[str, Any] ):
return 1_00
@property
def a ( self : List[Any] ):
__UpperCAmelCase = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def a ( self : Union[str, Any] ):
torch.manual_seed(0 )
__UpperCAmelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__UpperCAmelCase = MultilingualCLIP(_lowercase )
__UpperCAmelCase = text_encoder.eval()
return text_encoder
@property
def a ( self : Any ):
torch.manual_seed(0 )
__UpperCAmelCase = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__UpperCAmelCase = UNetaDConditionModel(**_lowercase )
return model
@property
def a ( self : List[str] ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a ( self : Tuple ):
torch.manual_seed(0 )
__UpperCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def a ( self : Optional[int] ):
__UpperCAmelCase = self.dummy_text_encoder
__UpperCAmelCase = self.dummy_tokenizer
__UpperCAmelCase = self.dummy_unet
__UpperCAmelCase = self.dummy_movq
__UpperCAmelCase = {
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00_085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
__UpperCAmelCase = DDIMScheduler(**_lowercase )
__UpperCAmelCase = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def a ( self : Optional[int] , _lowercase : List[str] , _lowercase : List[str]=0 ):
__UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowercase ) ).to(_lowercase )
__UpperCAmelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowercase )
# create init_image
__UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase ) ).to(_lowercase )
__UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase = Image.fromarray(np.uinta(_lowercase ) ).convert('''RGB''' ).resize((2_56, 2_56) )
if str(_lowercase ).startswith('''mps''' ):
__UpperCAmelCase = torch.manual_seed(_lowercase )
else:
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__UpperCAmelCase = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def a ( self : List[Any] ):
__UpperCAmelCase = '''cpu'''
__UpperCAmelCase = self.get_dummy_components()
__UpperCAmelCase = self.pipeline_class(**_lowercase )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = pipe(**self.get_dummy_inputs(_lowercase ) )
__UpperCAmelCase = output.images
__UpperCAmelCase = pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
__UpperCAmelCase = image[0, -3:, -3:, -1]
__UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : List[str] ):
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
__UpperCAmelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
__UpperCAmelCase = '''A red cartoon frog, 4k'''
__UpperCAmelCase = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowercase )
__UpperCAmelCase = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
__UpperCAmelCase , __UpperCAmelCase = pipe_prior(
_lowercase , generator=_lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__UpperCAmelCase = pipeline(
_lowercase , image=_lowercase , image_embeds=_lowercase , negative_image_embeds=_lowercase , generator=_lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , )
__UpperCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
| 49
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
UpperCAmelCase_ : int = logging.get_logger(__name__)
UpperCAmelCase_ : int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase_ : List[str] = {
'vocab_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt',
},
'tokenizer_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'
),
'google/realm-orqa-nq-openqa': (
'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-nq-reader': (
'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-openqa': (
'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-reader': (
'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'
),
},
}
UpperCAmelCase_ : str = {
'google/realm-cc-news-pretrained-embedder': 5_1_2,
'google/realm-cc-news-pretrained-encoder': 5_1_2,
'google/realm-cc-news-pretrained-scorer': 5_1_2,
'google/realm-cc-news-pretrained-openqa': 5_1_2,
'google/realm-orqa-nq-openqa': 5_1_2,
'google/realm-orqa-nq-reader': 5_1_2,
'google/realm-orqa-wq-openqa': 5_1_2,
'google/realm-orqa-wq-reader': 5_1_2,
}
UpperCAmelCase_ : str = {
'google/realm-cc-news-pretrained-embedder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-encoder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-scorer': {'do_lower_case': True},
'google/realm-cc-news-pretrained-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-reader': {'do_lower_case': True},
'google/realm-orqa-wq-openqa': {'do_lower_case': True},
'google/realm-orqa-wq-reader': {'do_lower_case': True},
}
class _lowerCamelCase ( snake_case_ ):
'''simple docstring'''
__lowercase : Union[str, Any] = VOCAB_FILES_NAMES
__lowercase : int = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
__lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[str] = RealmTokenizer
def __init__( self , __lowercase=None , __lowercase=None , __lowercase=True , __lowercase="[UNK]" , __lowercase="[SEP]" , __lowercase="[PAD]" , __lowercase="[CLS]" , __lowercase="[MASK]" , __lowercase=True , __lowercase=None , **__lowercase , ):
"""simple docstring"""
super().__init__(
__lowercase , tokenizer_file=__lowercase , do_lower_case=__lowercase , unk_token=__lowercase , sep_token=__lowercase , pad_token=__lowercase , cls_token=__lowercase , mask_token=__lowercase , tokenize_chinese_chars=__lowercase , strip_accents=__lowercase , **__lowercase , )
__A : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __lowercase ) != do_lower_case
or normalizer_state.get('strip_accents' , __lowercase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __lowercase ) != tokenize_chinese_chars
):
__A : Tuple = getattr(__lowercase , normalizer_state.pop('type' ) )
__A : Optional[int] = do_lower_case
__A : List[str] = strip_accents
__A : Dict = tokenize_chinese_chars
__A : List[Any] = normalizer_class(**__lowercase )
__A : int = do_lower_case
def snake_case__ ( self , __lowercase , **__lowercase ):
"""simple docstring"""
__A : Dict = PaddingStrategy.MAX_LENGTH
__A : Optional[int] = text
__A : Union[str, Any] = kwargs.pop('text_pair' , __lowercase )
__A : List[str] = kwargs.pop('return_tensors' , __lowercase )
__A : int = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(__lowercase ):
if batch_text_pair is not None:
__A : Tuple = batch_text_pair[idx]
else:
__A : Union[str, Any] = None
__A : Optional[int] = super().__call__(__lowercase , __lowercase , return_tensors=__lowercase , **__lowercase )
__A : str = encoded_candidates.get('input_ids' )
__A : Union[str, Any] = encoded_candidates.get('attention_mask' )
__A : Optional[int] = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(__lowercase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(__lowercase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(__lowercase )
__A : List[str] = {key: item for key, item in output_data.items() if len(__lowercase ) != 0}
return BatchEncoding(__lowercase , tensor_type=__lowercase )
def snake_case__ ( self , __lowercase , __lowercase=None ):
"""simple docstring"""
__A : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self , __lowercase , __lowercase = None ):
"""simple docstring"""
__A : Union[str, Any] = [self.sep_token_id]
__A : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self , __lowercase , __lowercase = None ):
"""simple docstring"""
__A : int = self._tokenizer.model.save(__lowercase , name=__lowercase )
return tuple(__lowercase )
| 365
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : int = 'speech_to_text'
lowercase__ : List[Any] = ['past_key_values']
lowercase__ : Optional[int] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , lowerCamelCase__=1_0_0_0_0 , lowerCamelCase__=1_2 , lowerCamelCase__=2_0_4_8 , lowerCamelCase__=4 , lowerCamelCase__=6 , lowerCamelCase__=2_0_4_8 , lowerCamelCase__=4 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="relu" , lowerCamelCase__=2_5_6 , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__=6_0_0_0 , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=2 , lowerCamelCase__=(5, 5) , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=8_0 , lowerCamelCase__=1 , **lowerCamelCase__ , ):
_lowerCamelCase = vocab_size
_lowerCamelCase = d_model
_lowerCamelCase = encoder_ffn_dim
_lowerCamelCase = encoder_layers
_lowerCamelCase = encoder_attention_heads
_lowerCamelCase = decoder_ffn_dim
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = dropout
_lowerCamelCase = attention_dropout
_lowerCamelCase = activation_dropout
_lowerCamelCase = activation_function
_lowerCamelCase = init_std
_lowerCamelCase = encoder_layerdrop
_lowerCamelCase = decoder_layerdrop
_lowerCamelCase = use_cache
_lowerCamelCase = encoder_layers
_lowerCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCamelCase = max_source_positions
_lowerCamelCase = max_target_positions
_lowerCamelCase = num_conv_layers
_lowerCamelCase = list(lowerCamelCase__ )
_lowerCamelCase = conv_channels
_lowerCamelCase = input_feat_per_channel
_lowerCamelCase = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '''
F"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
| 623
|
"""simple docstring"""
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=9_9 , lowerCamelCase__=1_3 , lowerCamelCase__=1_6 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=True , lowerCamelCase__=2 , lowerCamelCase__=3_2 , lowerCamelCase__=4 , lowerCamelCase__=4 , lowerCamelCase__=3_0 , lowerCamelCase__=0 , lowerCamelCase__=1 , lowerCamelCase__=2 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = decoder_seq_length
# For common tests
_lowerCamelCase = self.decoder_seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_attention_mask
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = d_model
_lowerCamelCase = d_model
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_layers
_lowerCamelCase = decoder_ffn_dim
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = decoder_attention_heads
_lowerCamelCase = eos_token_id
_lowerCamelCase = bos_token_id
_lowerCamelCase = pad_token_id
_lowerCamelCase = decoder_start_token_id
_lowerCamelCase = use_cache
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = None
_lowerCamelCase = decoder_seq_length
_lowerCamelCase = 2
_lowerCamelCase = 1
def snake_case__ ( self ):
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_attention_mask:
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_lowerCamelCase = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
_lowerCamelCase = True
_lowerCamelCase = TrOCRDecoder(config=lowerCamelCase__ ).to(lowerCamelCase__ ).eval()
_lowerCamelCase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , use_cache=lowerCamelCase__ )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) )
self.parent.assertTrue(len(lowerCamelCase__ ) == len(lowerCamelCase__ ) + 1 )
_lowerCamelCase = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCamelCase = model(lowerCamelCase__ )['''last_hidden_state''']
_lowerCamelCase = model(lowerCamelCase__ , past_key_values=lowerCamelCase__ )['''last_hidden_state''']
# select random slice
_lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCamelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_lowerCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_( A__, A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : int = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowercase__ : List[str] = (TrOCRForCausalLM,) if is_torch_available() else ()
lowercase__ : Tuple = {'text-generation': TrOCRForCausalLM} if is_torch_available() else {}
lowercase__ : Dict = True
lowercase__ : Optional[Any] = False
def snake_case__ ( self ):
_lowerCamelCase = TrOCRStandaloneDecoderModelTester(self , is_training=lowerCamelCase__ )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
self.config_tester.run_common_tests()
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*lowerCamelCase__ )
def snake_case__ ( self ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def snake_case__ ( self ):
pass
| 623
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt',
},
'tokenizer_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'
),
'google/realm-orqa-nq-openqa': (
'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-nq-reader': (
'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-openqa': (
'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-reader': (
'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'
),
},
}
SCREAMING_SNAKE_CASE_ = {
'google/realm-cc-news-pretrained-embedder': 5_12,
'google/realm-cc-news-pretrained-encoder': 5_12,
'google/realm-cc-news-pretrained-scorer': 5_12,
'google/realm-cc-news-pretrained-openqa': 5_12,
'google/realm-orqa-nq-openqa': 5_12,
'google/realm-orqa-nq-reader': 5_12,
'google/realm-orqa-wq-openqa': 5_12,
'google/realm-orqa-wq-reader': 5_12,
}
SCREAMING_SNAKE_CASE_ = {
'google/realm-cc-news-pretrained-embedder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-encoder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-scorer': {'do_lower_case': True},
'google/realm-cc-news-pretrained-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-reader': {'do_lower_case': True},
'google/realm-orqa-wq-openqa': {'do_lower_case': True},
'google/realm-orqa-wq-reader': {'do_lower_case': True},
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = RealmTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ):
'''simple docstring'''
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
__UpperCAmelCase: Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
__UpperCAmelCase: Union[str, Any] = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
__UpperCAmelCase: Optional[int] = do_lower_case
__UpperCAmelCase: Optional[Any] = strip_accents
__UpperCAmelCase: Dict = tokenize_chinese_chars
__UpperCAmelCase: int = normalizer_class(**snake_case_ )
__UpperCAmelCase: Dict = do_lower_case
def lowercase_ ( self , snake_case_ , **snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = PaddingStrategy.MAX_LENGTH
__UpperCAmelCase: Tuple = text
__UpperCAmelCase: Dict = kwargs.pop("""text_pair""" , snake_case_ )
__UpperCAmelCase: List[Any] = kwargs.pop("""return_tensors""" , snake_case_ )
__UpperCAmelCase: Optional[int] = {
"""input_ids""": [],
"""attention_mask""": [],
"""token_type_ids""": [],
}
for idx, candidate_text in enumerate(snake_case_ ):
if batch_text_pair is not None:
__UpperCAmelCase: Dict = batch_text_pair[idx]
else:
__UpperCAmelCase: List[Any] = None
__UpperCAmelCase: Tuple = super().__call__(snake_case_ , snake_case_ , return_tensors=snake_case_ , **snake_case_ )
__UpperCAmelCase: Any = encoded_candidates.get("""input_ids""" )
__UpperCAmelCase: Dict = encoded_candidates.get("""attention_mask""" )
__UpperCAmelCase: str = encoded_candidates.get("""token_type_ids""" )
if encoded_input_ids is not None:
output_data["input_ids"].append(snake_case_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(snake_case_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(snake_case_ )
__UpperCAmelCase: Any = {key: item for key, item in output_data.items() if len(snake_case_ ) != 0}
return BatchEncoding(snake_case_ , tensor_type=snake_case_ )
def lowercase_ ( self , snake_case_ , snake_case_=None ):
'''simple docstring'''
__UpperCAmelCase: Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
__UpperCAmelCase: str = [self.sep_token_id]
__UpperCAmelCase: Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
__UpperCAmelCase: Tuple = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 523
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'tanreinama/GPTSAN-2.8B-spout_is_uniform': (
'https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json'
),
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = """gptsan-japanese"""
__lowerCAmelCase = [
"""past_key_values""",
]
__lowerCAmelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , snake_case_=3_6000 , snake_case_=1280 , snake_case_=1024 , snake_case_=8192 , snake_case_=4096 , snake_case_=128 , snake_case_=10 , snake_case_=0 , snake_case_=16 , snake_case_=16 , snake_case_=128 , snake_case_=0.0 , snake_case_=1e-5 , snake_case_=False , snake_case_=0.0 , snake_case_="float32" , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=0.0_0_2 , snake_case_=False , snake_case_=True , snake_case_=3_5998 , snake_case_=3_5995 , snake_case_=3_5999 , **snake_case_ , ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = vocab_size
__UpperCAmelCase: List[str] = max_position_embeddings
__UpperCAmelCase: List[Any] = d_model
__UpperCAmelCase: List[str] = d_ff
__UpperCAmelCase: Union[str, Any] = d_ext
__UpperCAmelCase: List[Any] = d_spout
__UpperCAmelCase: Dict = num_switch_layers
__UpperCAmelCase: List[str] = num_ext_layers
__UpperCAmelCase: Tuple = num_switch_layers + num_ext_layers
__UpperCAmelCase: Any = num_heads
__UpperCAmelCase: Optional[Any] = num_experts
__UpperCAmelCase: Tuple = expert_capacity
__UpperCAmelCase: Tuple = dropout_rate
__UpperCAmelCase: Optional[int] = layer_norm_epsilon
__UpperCAmelCase: Union[str, Any] = router_bias
__UpperCAmelCase: Optional[Any] = router_jitter_noise
__UpperCAmelCase: str = router_dtype
__UpperCAmelCase: Union[str, Any] = router_ignore_padding_tokens
__UpperCAmelCase: Optional[int] = output_hidden_states
__UpperCAmelCase: Optional[Any] = output_attentions
__UpperCAmelCase: Any = initializer_factor
__UpperCAmelCase: Tuple = output_router_logits
__UpperCAmelCase: Tuple = use_cache
super().__init__(
separator_token_id=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ , )
| 523
| 1
|
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __lowerCamelCase ( _lowercase ) -> List[str]:
UpperCAmelCase : Tuple = SwinConfig()
UpperCAmelCase : Optional[Any] = swin_name.split("""_""" )
UpperCAmelCase : Any = name_split[1]
UpperCAmelCase : Dict = int(name_split[4] )
UpperCAmelCase : Optional[int] = int(name_split[3][-1] )
if model_size == "tiny":
UpperCAmelCase : Optional[Any] = 9_6
UpperCAmelCase : Union[str, Any] = (2, 2, 6, 2)
UpperCAmelCase : Union[str, Any] = (3, 6, 1_2, 2_4)
elif model_size == "small":
UpperCAmelCase : Union[str, Any] = 9_6
UpperCAmelCase : List[str] = (2, 2, 1_8, 2)
UpperCAmelCase : List[Any] = (3, 6, 1_2, 2_4)
elif model_size == "base":
UpperCAmelCase : Tuple = 1_2_8
UpperCAmelCase : List[Any] = (2, 2, 1_8, 2)
UpperCAmelCase : Any = (4, 8, 1_6, 3_2)
else:
UpperCAmelCase : Optional[Any] = 1_9_2
UpperCAmelCase : Tuple = (2, 2, 1_8, 2)
UpperCAmelCase : Dict = (6, 1_2, 2_4, 4_8)
if "in22k" in swin_name:
UpperCAmelCase : str = 2_1_8_4_1
else:
UpperCAmelCase : Optional[Any] = 1_0_0_0
UpperCAmelCase : Optional[int] = """huggingface/label-files"""
UpperCAmelCase : Dict = """imagenet-1k-id2label.json"""
UpperCAmelCase : Any = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : str = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase : List[Any] = idalabel
UpperCAmelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
UpperCAmelCase : str = img_size
UpperCAmelCase : Union[str, Any] = num_classes
UpperCAmelCase : Union[str, Any] = embed_dim
UpperCAmelCase : List[str] = depths
UpperCAmelCase : Dict = num_heads
UpperCAmelCase : List[Any] = window_size
return config
def __lowerCamelCase ( _lowercase ) -> Any:
if "patch_embed.proj" in name:
UpperCAmelCase : str = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
UpperCAmelCase : int = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
UpperCAmelCase : Union[str, Any] = """encoder.""" + name
if "attn.proj" in name:
UpperCAmelCase : Dict = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
UpperCAmelCase : List[Any] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
UpperCAmelCase : Tuple = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
UpperCAmelCase : Any = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
UpperCAmelCase : Tuple = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
UpperCAmelCase : Any = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "norm.weight":
UpperCAmelCase : int = """layernorm.weight"""
if name == "norm.bias":
UpperCAmelCase : List[str] = """layernorm.bias"""
if "head" in name:
UpperCAmelCase : Optional[int] = name.replace("""head""" , """classifier""" )
else:
UpperCAmelCase : str = """swin.""" + name
return name
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[int]:
for key in orig_state_dict.copy().keys():
UpperCAmelCase : List[str] = orig_state_dict.pop(_lowercase )
if "mask" in key:
continue
elif "qkv" in key:
UpperCAmelCase : Tuple = key.split(""".""" )
UpperCAmelCase : List[Any] = int(key_split[1] )
UpperCAmelCase : str = int(key_split[3] )
UpperCAmelCase : Optional[Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase : str = val[:dim, :]
UpperCAmelCase : Optional[Any] = val[
dim : dim * 2, :
]
UpperCAmelCase : List[Any] = val[-dim:, :]
else:
UpperCAmelCase : Optional[Any] = val[
:dim
]
UpperCAmelCase : Dict = val[
dim : dim * 2
]
UpperCAmelCase : Tuple = val[
-dim:
]
else:
UpperCAmelCase : str = val
return orig_state_dict
def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]:
UpperCAmelCase : Dict = timm.create_model(_lowercase , pretrained=_lowercase )
timm_model.eval()
UpperCAmelCase : Tuple = get_swin_config(_lowercase )
UpperCAmelCase : Optional[Any] = SwinForImageClassification(_lowercase )
model.eval()
UpperCAmelCase : Optional[Any] = convert_state_dict(timm_model.state_dict() , _lowercase )
model.load_state_dict(_lowercase )
UpperCAmelCase : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : List[Any] = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swin_name.replace("""_""" , """-""" ) ) )
UpperCAmelCase : Optional[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
UpperCAmelCase : List[str] = image_processor(images=_lowercase , return_tensors="""pt""" )
UpperCAmelCase : List[Any] = timm_model(inputs["""pixel_values"""] )
UpperCAmelCase : str = model(**_lowercase ).logits
assert torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print(F'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a : List[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 672
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
a : List[str] = logging.get_logger(__name__)
class UpperCamelCase_ ( __magic_name__ ):
def _lowercase( self , A ) -> Optional[int]:
if isinstance(A , A ):
UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split(""",""" ) if label.strip()]
return labels
def __call__( self , A , A , A ) -> str:
if len(A ) == 0 or len(A ) == 0:
raise ValueError("""You must include at least one label and at least one sequence.""" )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"""The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. """
"""Make sure the passed template includes formatting syntax such as {{}} where the label should go."""
).format(A ) )
if isinstance(A , A ):
UpperCAmelCase : Tuple = [sequences]
UpperCAmelCase : Optional[Any] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(A )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__magic_name__ )
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A=ZeroShotClassificationArgumentHandler() , *A , **A ) -> Optional[int]:
UpperCAmelCase : Tuple = args_parser
super().__init__(*A , **A )
if self.entailment_id == -1:
logger.warning(
"""Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to """
"""-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.""" )
@property
def _lowercase( self ) -> List[Any]:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("""entail""" ):
return ind
return -1
def _lowercase( self , A , A=True , A=True , A=TruncationStrategy.ONLY_FIRST , **A ) -> str:
UpperCAmelCase : Tuple = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"""Tokenizer was not supporting padding necessary for zero-shot, attempting to use """
""" `pad_token=eos_token`""" )
UpperCAmelCase : Any = self.tokenizer.eos_token
try:
UpperCAmelCase : Tuple = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=A , )
except Exception as e:
if "too short" in str(A ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCAmelCase : List[str] = self.tokenizer(
A , add_special_tokens=A , return_tensors=A , padding=A , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowercase( self , **A ) -> Tuple:
if kwargs.get("""multi_class""" , A ) is not None:
UpperCAmelCase : Any = kwargs["""multi_class"""]
logger.warning(
"""The `multi_class` argument has been deprecated and renamed to `multi_label`. """
"""`multi_class` will be removed in a future version of Transformers.""" )
UpperCAmelCase : int = {}
if "candidate_labels" in kwargs:
UpperCAmelCase : Tuple = self._args_parser._parse_labels(kwargs["""candidate_labels"""] )
if "hypothesis_template" in kwargs:
UpperCAmelCase : List[Any] = kwargs["""hypothesis_template"""]
UpperCAmelCase : Dict = {}
if "multi_label" in kwargs:
UpperCAmelCase : Union[str, Any] = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self , A , *A , **A , ) -> Tuple:
if len(A ) == 0:
pass
elif len(A ) == 1 and "candidate_labels" not in kwargs:
UpperCAmelCase : Optional[Any] = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(A , **A )
def _lowercase( self , A , A=None , A="This example is {}." ) -> List[Any]:
UpperCAmelCase , UpperCAmelCase : List[Any] = self._args_parser(A , A , A )
for i, (candidate_label, sequence_pair) in enumerate(zip(A , A ) ):
UpperCAmelCase : Any = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(A ) - 1,
**model_input,
}
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = inputs["""candidate_label"""]
UpperCAmelCase : Tuple = inputs["""sequence"""]
UpperCAmelCase : List[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCAmelCase : Tuple = self.model(**A )
UpperCAmelCase : Optional[int] = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def _lowercase( self , A , A=False ) -> List[str]:
UpperCAmelCase : Dict = [outputs["""candidate_label"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = [outputs["""sequence"""] for outputs in model_outputs]
UpperCAmelCase : List[Any] = np.concatenate([output["""logits"""].numpy() for output in model_outputs] )
UpperCAmelCase : Optional[Any] = logits.shape[0]
UpperCAmelCase : int = len(A )
UpperCAmelCase : List[Any] = N // n
UpperCAmelCase : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(A ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCAmelCase : str = self.entailment_id
UpperCAmelCase : str = -1 if entailment_id == 0 else 0
UpperCAmelCase : Optional[Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCAmelCase : int = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCAmelCase : Dict = reshaped_outputs[..., self.entailment_id]
UpperCAmelCase : Optional[int] = np.exp(A ) / np.exp(A ).sum(-1 , keepdims=A )
UpperCAmelCase : int = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 672
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.