code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =AltDiffusionPipeline
__UpperCAmelCase : str =TEXT_TO_IMAGE_PARAMS
__UpperCAmelCase : Dict =TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
__UpperCAmelCase : Any =TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case ( self ):
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_02 , )
__lowerCAmelCase = CLIPTextModel(__a )
__lowerCAmelCase = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
__lowerCAmelCase = 77
__lowerCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def snake_case ( self , __a , __a=0 ):
if str(__a ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(__a )
else:
__lowerCAmelCase = torch.Generator(device=__a ).manual_seed(__a )
__lowerCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def snake_case ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def snake_case ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def snake_case ( self ):
__lowerCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
torch.manual_seed(0 )
__lowerCAmelCase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
__lowerCAmelCase = RobertaSeriesModelWithTransformation(__a )
__lowerCAmelCase = text_encoder
__lowerCAmelCase = AltDiffusionPipeline(**__a )
__lowerCAmelCase = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
__lowerCAmelCase = self.get_dummy_inputs(__a )
__lowerCAmelCase = "A photo of an astronaut"
__lowerCAmelCase = alt_pipe(**__a )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.5_7_4_8_1_6_2, 0.6_0_4_4_7_1_4_5, 0.4_8_8_2_1_2_1_7, 0.5_0_1_0_0_6_3_6, 0.5_4_3_1_1_8_5, 0.4_5_7_6_3_6_8_3, 0.4_9_6_5_7_6_9_6, 0.4_8_1_3_2_7_3_3, 0.4_7_5_7_3_0_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case ( self ):
__lowerCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = PNDMScheduler(skip_prk_steps=__a )
torch.manual_seed(0 )
__lowerCAmelCase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=50_02 , )
# TODO: remove after fixing the non-deterministic text encoder
__lowerCAmelCase = RobertaSeriesModelWithTransformation(__a )
__lowerCAmelCase = text_encoder
__lowerCAmelCase = AltDiffusionPipeline(**__a )
__lowerCAmelCase = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
__lowerCAmelCase = self.get_dummy_inputs(__a )
__lowerCAmelCase = alt_pipe(**__a )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.5_1_6_0_5_0_9_3, 0.5_7_0_7_2_4_1, 0.4_7_3_6_5_5_0_7, 0.5_0_5_7_8_8_8_6, 0.5_6_3_3_8_7_7, 0.4_6_4_2_5_0_3, 0.5_1_8_2_0_8_1, 0.4_8_7_6_3_4_8_4, 0.4_9_0_8_4_2_3_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self ):
# make sure here that pndm scheduler skips prk
__lowerCAmelCase = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=__a )
__lowerCAmelCase = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
__lowerCAmelCase = "A painting of a squirrel eating a burger"
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = alt_pipe([prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=20 , output_type="np" )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCAmelCase = np.array([0.1_0_1_0, 0.0_8_0_0, 0.0_7_9_4, 0.0_8_8_5, 0.0_8_4_3, 0.0_7_6_2, 0.0_7_6_9, 0.0_7_2_9, 0.0_5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case ( self ):
__lowerCAmelCase = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" )
__lowerCAmelCase = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=__a , safety_checker=__a )
__lowerCAmelCase = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
__lowerCAmelCase = "A painting of a squirrel eating a burger"
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = alt_pipe([prompt] , generator=__a , num_inference_steps=2 , output_type="numpy" )
__lowerCAmelCase = output.images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCAmelCase = np.array([0.4_0_1_9, 0.4_0_5_2, 0.3_8_1_0, 0.4_1_1_9, 0.3_9_1_6, 0.3_9_8_2, 0.4_6_5_1, 0.4_1_9_5, 0.5_3_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 57
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase : Union[str, Any] =TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = TextaTextGenerationPipeline(model=__a , tokenizer=__a )
return generator, ["Something to write", "Something else"]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = generator("Something there" )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) )
__lowerCAmelCase = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
__lowerCAmelCase = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
with self.assertRaises(__a ):
generator(4 )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
__lowerCAmelCase = 3
__lowerCAmelCase = generator(
"Something there" , num_return_sequences=__a , num_beams=__a , )
__lowerCAmelCase = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(__a , __a )
__lowerCAmelCase = generator("This is a test" , do_sample=__a , num_return_sequences=2 , return_tensors=__a )
self.assertEqual(
__a , [
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
] , )
__lowerCAmelCase = generator.model.config.eos_token_id
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = generator(
["This is a test", "This is a second test"] , do_sample=__a , num_return_sequences=2 , batch_size=2 , return_tensors=__a , )
self.assertEqual(
__a , [
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
] , )
@require_tf
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
| 57
| 1
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
while len(_UpperCamelCase ) > 1:
__lowerCAmelCase = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
__lowerCAmelCase = files.index(min(_UpperCamelCase ) )
temp += files[min_index]
files.pop(_UpperCamelCase )
files.append(_UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
|
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCamelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCAmelCase = model
__lowerCAmelCase = 2
__lowerCAmelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def snake_case ( self ):
pass
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = LongformerModel.from_pretrained(_UpperCamelCase )
__lowerCAmelCase = LightningModel(_UpperCamelCase )
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
__lowerCAmelCase = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 57
| 1
|
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
A : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
A : Dict = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) )
__lowerCAmelCase = self.transformer_dir
shutil.copy(
os.path.join(__a , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , )
def snake_case ( self ):
__lowerCAmelCase = "src/transformers"
shutil.rmtree(self.transformer_dir )
def snake_case ( self , __a , __a , __a , __a=None ):
__lowerCAmelCase = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
__lowerCAmelCase = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
__lowerCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
__lowerCAmelCase = black.format_str(__a , mode=__a )
__lowerCAmelCase = os.path.join(self.transformer_dir , "new_code.py" )
with open(__a , "w" , newline="\n" ) as f:
f.write(__a )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__a ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__a )
with open(__a , "r" ) as f:
self.assertTrue(f.read() , __a )
def snake_case ( self ):
__lowerCAmelCase = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(__a , __a )
def snake_case ( self ):
# Base copy consistency
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , __a , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , __a ) , )
# Copy consistency with a really long name
__lowerCAmelCase = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}" , f"{long_class_name}LMPredictionHead" , re.sub("Bert" , __a , __a ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , __a , overwrite_result=re.sub("Bert" , "TestModel" , __a ) , )
def snake_case ( self ):
__lowerCAmelCase = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
__lowerCAmelCase = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
__lowerCAmelCase = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
__lowerCAmelCase = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
__lowerCAmelCase , __lowerCAmelCase = check_copies.convert_to_localized_md(
__a , __a , localized_readme["format_model_list"] )
self.assertFalse(__a )
self.assertEqual(__a , __a )
__lowerCAmelCase , __lowerCAmelCase = check_copies.convert_to_localized_md(
__a , __a , localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(__a )
__lowerCAmelCase = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
__lowerCAmelCase = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
__lowerCAmelCase = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
__lowerCAmelCase , __lowerCAmelCase = check_copies.convert_to_localized_md(
__a , __a , localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(__a , __a )
| 57
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
__lowerCAmelCase = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
A : Union[str, Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
A : str = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 57
| 1
|
"""simple docstring"""
from math import sqrt
def _lowerCamelCase ( _UpperCamelCase = 100_0000 ):
'''simple docstring'''
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_UpperCamelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 57
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""image_processor""", """tokenizer"""]
__UpperCAmelCase : Optional[Any] ="""CLIPImageProcessor"""
__UpperCAmelCase : Union[str, Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self , __a=None , __a=None , **__a ):
__lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
__lowerCAmelCase = kwargs.pop("feature_extractor" )
__lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self , __a=None , __a=None , __a=None , **__a ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__lowerCAmelCase = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
__lowerCAmelCase = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
__lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.decode(*__a , **__a )
@property
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 57
| 1
|
"""simple docstring"""
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] =["""image_processor"""]
__UpperCAmelCase : str ="""SamImageProcessor"""
def __init__( self , __a ):
super().__init__(__a )
__lowerCAmelCase = self.image_processor
__lowerCAmelCase = -10
__lowerCAmelCase = self.image_processor.size["longest_edge"]
def __call__( self , __a=None , __a=None , __a=None , __a=None , __a = None , **__a , ):
__lowerCAmelCase = self.image_processor(
__a , return_tensors=__a , **__a , )
# pop arguments that are not used in the foward but used nevertheless
__lowerCAmelCase = encoding_image_processor["original_sizes"]
if hasattr(__a , "numpy" ): # Checks if Torch or TF tensor
__lowerCAmelCase = original_sizes.numpy()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._check_and_preprocess_points(
input_points=__a , input_labels=__a , input_boxes=__a , )
__lowerCAmelCase = self._normalize_and_convert(
__a , __a , input_points=__a , input_labels=__a , input_boxes=__a , return_tensors=__a , )
return encoding_image_processor
def snake_case ( self , __a , __a , __a=None , __a=None , __a=None , __a="pt" , ):
if input_points is not None:
if len(__a ) != len(__a ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size , __a , original_sizes[0] ) for point in input_points
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size , __a , __a )
for point, original_size in zip(__a , __a )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__lowerCAmelCase , __lowerCAmelCase = self._pad_points_and_labels(__a , __a )
__lowerCAmelCase = np.array(__a )
if input_labels is not None:
__lowerCAmelCase = np.array(__a )
if input_boxes is not None:
if len(__a ) != len(__a ):
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size , __a , original_sizes[0] , is_bounding_box=__a )
for box in input_boxes
]
else:
__lowerCAmelCase = [
self._normalize_coordinates(self.target_size , __a , __a , is_bounding_box=__a )
for box, original_size in zip(__a , __a )
]
__lowerCAmelCase = np.array(__a )
if input_boxes is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__a )
# boxes batch size of 1 by default
__lowerCAmelCase = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__a )
# boxes batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__a , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__a )
# point batch size of 1 by default
__lowerCAmelCase = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__a )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__a , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"input_points": input_points} )
if input_labels is not None:
if return_tensors == "pt":
__lowerCAmelCase = torch.from_numpy(__a )
# point batch size of 1 by default
__lowerCAmelCase = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__lowerCAmelCase = tf.convert_to_tensor(__a )
# point batch size of 1 by default
__lowerCAmelCase = tf.expand_dims(__a , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels} )
return encoding_image_processor
def snake_case ( self , __a , __a ):
__lowerCAmelCase = max([point.shape[0] for point in input_points] )
__lowerCAmelCase = []
for i, point in enumerate(__a ):
if point.shape[0] != expected_nb_points:
__lowerCAmelCase = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
__lowerCAmelCase = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(__a )
__lowerCAmelCase = processed_input_points
return input_points, input_labels
def snake_case ( self , __a , __a , __a , __a=False ):
__lowerCAmelCase , __lowerCAmelCase = original_size
__lowerCAmelCase , __lowerCAmelCase = self.image_processor._get_preprocess_shape(__a , longest_edge=__a )
__lowerCAmelCase = deepcopy(__a ).astype(__a )
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1 , 2 , 2 )
__lowerCAmelCase = coords[..., 0] * (new_w / old_w)
__lowerCAmelCase = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__lowerCAmelCase = coords.reshape(-1 , 4 )
return coords
def snake_case ( self , __a=None , __a=None , __a=None , ):
if input_points is not None:
if hasattr(__a , "numpy" ): # Checks for TF or Torch tensor
__lowerCAmelCase = input_points.numpy().tolist()
if not isinstance(__a , __a ) or not isinstance(input_points[0] , __a ):
raise ValueError("Input points must be a list of list of floating points." )
__lowerCAmelCase = [np.array(__a ) for input_point in input_points]
else:
__lowerCAmelCase = None
if input_labels is not None:
if hasattr(__a , "numpy" ):
__lowerCAmelCase = input_labels.numpy().tolist()
if not isinstance(__a , __a ) or not isinstance(input_labels[0] , __a ):
raise ValueError("Input labels must be a list of list integers." )
__lowerCAmelCase = [np.array(__a ) for label in input_labels]
else:
__lowerCAmelCase = None
if input_boxes is not None:
if hasattr(__a , "numpy" ):
__lowerCAmelCase = input_boxes.numpy().tolist()
if (
not isinstance(__a , __a )
or not isinstance(input_boxes[0] , __a )
or not isinstance(input_boxes[0][0] , __a )
):
raise ValueError("Input boxes must be a list of list of list of floating points." )
__lowerCAmelCase = [np.array(__a ).astype(np.floataa ) for box in input_boxes]
else:
__lowerCAmelCase = None
return input_points, input_labels, input_boxes
@property
def snake_case ( self ):
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(__a ) )
def snake_case ( self , *__a , **__a ):
return self.image_processor.post_process_masks(*__a , **__a )
| 57
|
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _UpperCamelCase :
'''simple docstring'''
pass
| 57
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =BlenderbotSmallConfig
__UpperCAmelCase : Tuple ={}
__UpperCAmelCase : Dict ="""gelu"""
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = bos_token_id
def snake_case ( self ):
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowerCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowerCAmelCase = prepare_blenderbot_small_inputs_dict(__a , __a , __a )
return config, inputs_dict
def snake_case ( self , __a , __a ):
__lowerCAmelCase = TFBlenderbotSmallModel(config=__a ).get_decoder()
__lowerCAmelCase = inputs_dict["input_ids"]
__lowerCAmelCase = input_ids[:1, :]
__lowerCAmelCase = inputs_dict["attention_mask"][:1, :]
__lowerCAmelCase = inputs_dict["head_mask"]
__lowerCAmelCase = 1
# first forward pass
__lowerCAmelCase = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a )
__lowerCAmelCase , __lowerCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowerCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowerCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowerCAmelCase = model(__a , attention_mask=__a )[0]
__lowerCAmelCase = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowerCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
__lowerCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , ):
'''simple docstring'''
if attention_mask is None:
__lowerCAmelCase = tf.cast(tf.math.not_equal(_UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__lowerCAmelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__lowerCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str =(
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__UpperCAmelCase : Dict =(TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__UpperCAmelCase : int =(
{
"""conversational""": TFBlenderbotSmallForConditionalGeneration,
"""feature-extraction""": TFBlenderbotSmallModel,
"""summarization""": TFBlenderbotSmallForConditionalGeneration,
"""text2text-generation""": TFBlenderbotSmallForConditionalGeneration,
"""translation""": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : str =True
__UpperCAmelCase : Tuple =False
__UpperCAmelCase : Dict =False
def snake_case ( self ):
__lowerCAmelCase = TFBlenderbotSmallModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__a )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
@require_tokenizers
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =[
"""Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """
""" i'm going to throw up.\nand why is that?"""
]
__UpperCAmelCase : Optional[int] ="""facebook/blenderbot_small-90M"""
@cached_property
def snake_case ( self ):
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def snake_case ( self ):
__lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer(self.src_text , return_tensors="tf" )
__lowerCAmelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__a , )
__lowerCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__a )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 57
|
"""simple docstring"""
import sys
from collections import defaultdict
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
return self.node_position[vertex]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = pos
def snake_case ( self , __a , __a , __a , __a ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCAmelCase = 2 * start + 1
else:
__lowerCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCAmelCase , __lowerCAmelCase = heap[smallest_child], positions[smallest_child]
__lowerCAmelCase , __lowerCAmelCase = (
heap[start],
positions[start],
)
__lowerCAmelCase , __lowerCAmelCase = temp, tempa
__lowerCAmelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __a )
self.top_to_bottom(__a , __a , __a , __a )
def snake_case ( self , __a , __a , __a , __a ):
__lowerCAmelCase = position[index]
while index != 0:
__lowerCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCAmelCase = heap[parent]
__lowerCAmelCase = position[parent]
self.set_position(position[parent] , __a )
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , __a )
break
__lowerCAmelCase = parent
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , 0 )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = len(__a ) // 2 - 1
for i in range(__a , -1 , -1 ):
self.top_to_bottom(__a , __a , len(__a ) , __a )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = positions[0]
__lowerCAmelCase = sys.maxsize
self.top_to_bottom(__a , 0 , len(__a ) , __a )
return temp
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = Heap()
__lowerCAmelCase = [0] * len(_UpperCamelCase )
__lowerCAmelCase = [-1] * len(_UpperCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCAmelCase = []
for vertex in range(len(_UpperCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCamelCase )
heap.node_position.append(_UpperCamelCase )
__lowerCAmelCase = []
__lowerCAmelCase = 1
__lowerCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCAmelCase = 0
__lowerCAmelCase = distance
heap.heapify(_UpperCamelCase , _UpperCamelCase )
for _ in range(1 , len(_UpperCamelCase ) ):
__lowerCAmelCase = heap.delete_minimum(_UpperCamelCase , _UpperCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCamelCase )]
):
__lowerCAmelCase = distance
heap.bottom_to_top(
_UpperCamelCase , heap.get_position(_UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A : Optional[Any] = int(input("Enter number of edges: ").strip())
A : Dict = defaultdict(list)
for _ in range(edges_number):
A : str = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 57
| 1
|
"""simple docstring"""
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = 1
while len(_UpperCamelCase ) < 1e6:
constant.append(str(_UpperCamelCase ) )
i += 1
__lowerCAmelCase = "".join(_UpperCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 57
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : Tuple = logging.get_logger(__name__)
A : Tuple = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
A : Optional[Any] = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location="cpu" )
return sd
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=rename_keys_prefix ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict()
__lowerCAmelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__lowerCAmelCase = key
for name_pair in rename_keys_prefix:
__lowerCAmelCase = new_key.replace(name_pair[0] , name_pair[1] )
__lowerCAmelCase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__lowerCAmelCase = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."
# Get Config
if "pre" in checkpoint_path:
__lowerCAmelCase = "pretraining"
if "vcr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f"No implementation found for `{checkpoint_path}`." )
else:
if "vcr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 512}
__lowerCAmelCase = "multichoice"
elif "vqa_advanced" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
__lowerCAmelCase = "vqa_advanced"
elif "vqa" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048, "num_labels": 3129}
__lowerCAmelCase = "vqa"
elif "nlvr" in checkpoint_path:
__lowerCAmelCase = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
__lowerCAmelCase = "nlvr"
__lowerCAmelCase = VisualBertConfig(**_UpperCamelCase )
# Load State Dict
__lowerCAmelCase = load_state_dict(_UpperCamelCase )
__lowerCAmelCase = get_new_dict(_UpperCamelCase , _UpperCamelCase )
if model_type == "pretraining":
__lowerCAmelCase = VisualBertForPreTraining(_UpperCamelCase )
elif model_type == "vqa":
__lowerCAmelCase = VisualBertForQuestionAnswering(_UpperCamelCase )
elif model_type == "nlvr":
__lowerCAmelCase = VisualBertForVisualReasoning(_UpperCamelCase )
elif model_type == "multichoice":
__lowerCAmelCase = VisualBertForMultipleChoice(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
# Save Checkpoints
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
A : Optional[int] = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 57
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
A : Tuple = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=_UpperCamelCase , exist_ok=_UpperCamelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_UpperCamelCase , _UpperCamelCase , f=output_path.as_posix() , input_names=_UpperCamelCase , output_names=_UpperCamelCase , dynamic_axes=_UpperCamelCase , do_constant_folding=_UpperCamelCase , use_external_data_format=_UpperCamelCase , enable_onnx_checker=_UpperCamelCase , opset_version=_UpperCamelCase , )
else:
export(
_UpperCamelCase , _UpperCamelCase , f=output_path.as_posix() , input_names=_UpperCamelCase , output_names=_UpperCamelCase , dynamic_axes=_UpperCamelCase , do_constant_folding=_UpperCamelCase , opset_version=_UpperCamelCase , )
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = False ):
'''simple docstring'''
__lowerCAmelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__lowerCAmelCase = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
__lowerCAmelCase = "cpu"
__lowerCAmelCase = Path(_UpperCamelCase )
# VAE DECODER
__lowerCAmelCase = AutoencoderKL.from_pretrained(model_path + "/vae" )
__lowerCAmelCase = vae_decoder.config.latent_channels
# forward only through the decoder part
__lowerCAmelCase = vae_decoder.decode
onnx_export(
_UpperCamelCase , model_args=(
torch.randn(1 , _UpperCamelCase , 25 , 25 ).to(device=_UpperCamelCase , dtype=_UpperCamelCase ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=_UpperCamelCase , )
del vae_decoder
if __name__ == "__main__":
A : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=1_4,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
A : Optional[int] = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 57
|
"""simple docstring"""
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
pass
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
pass
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = [
[],
[],
[],
]
def snake_case ( self , __a , __a ):
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(__a )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self ):
return "\n".join(f"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
if len(self.queue ) == 1_00:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(__a )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
__lowerCAmelCase = min(self.queue )
self.queue.remove(__a )
return data
def __str__( self ):
return str(self.queue )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 57
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.0_2 , __a=3 , __a=4 , __a=None , ):
__lowerCAmelCase = parent
__lowerCAmelCase = 13
__lowerCAmelCase = 7
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = 99
__lowerCAmelCase = 32
__lowerCAmelCase = 2
__lowerCAmelCase = 4
__lowerCAmelCase = 37
__lowerCAmelCase = "gelu"
__lowerCAmelCase = 0.1
__lowerCAmelCase = 0.1
__lowerCAmelCase = 5_12
__lowerCAmelCase = 16
__lowerCAmelCase = 2
__lowerCAmelCase = 0.0_2
__lowerCAmelCase = 3
__lowerCAmelCase = 4
__lowerCAmelCase = None
def snake_case ( self ):
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = TFRoFormerModel(config=__a )
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
__lowerCAmelCase = [input_ids, input_mask]
__lowerCAmelCase = model(__a )
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = True
__lowerCAmelCase = TFRoFormerForCausalLM(config=__a )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__lowerCAmelCase = model(__a )["logits"]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = TFRoFormerForMaskedLM(config=__a )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFRoFormerForSequenceClassification(config=__a )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = TFRoFormerForMultipleChoice(config=__a )
__lowerCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase = tf.tile(tf.expand_dims(__a , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = TFRoFormerForTokenClassification(config=__a )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = TFRoFormerForQuestionAnswering(config=__a )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str =(
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
__UpperCAmelCase : List[Any] =(
{
"""feature-extraction""": TFRoFormerModel,
"""fill-mask""": TFRoFormerForMaskedLM,
"""question-answering""": TFRoFormerForQuestionAnswering,
"""text-classification""": TFRoFormerForSequenceClassification,
"""text-generation""": TFRoFormerForCausalLM,
"""token-classification""": TFRoFormerForTokenClassification,
"""zero-shot""": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : int =False
__UpperCAmelCase : List[Any] =False
def snake_case ( self , __a , __a , __a , __a , __a ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def snake_case ( self ):
__lowerCAmelCase = TFRoFormerModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__a , hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def snake_case ( self ):
__lowerCAmelCase = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" )
self.assertIsNotNone(__a )
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self ):
__lowerCAmelCase = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
__lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCAmelCase = model(__a )[0]
# TODO Replace vocab size
__lowerCAmelCase = 5_00_00
__lowerCAmelCase = [1, 6, vocab_size]
self.assertEqual(output.shape , __a )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__lowerCAmelCase = tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-4 )
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Dict =1E-4
def snake_case ( self ):
__lowerCAmelCase = tf.constant([[4, 10]] )
__lowerCAmelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__lowerCAmelCase = emba(input_ids.shape )
__lowerCAmelCase = tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(__a , __a , atol=self.tolerance )
def snake_case ( self ):
__lowerCAmelCase = tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
__lowerCAmelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 )
emba([2, 16, 5_12] )
__lowerCAmelCase = emba.weight[:3, :5]
tf.debugging.assert_near(__a , __a , atol=self.tolerance )
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any =1E-4
def snake_case ( self ):
# 2,12,16,64
__lowerCAmelCase = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
__lowerCAmelCase = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
__lowerCAmelCase = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__lowerCAmelCase = embed_positions([2, 16, 7_68] )[None, None, :, :]
__lowerCAmelCase , __lowerCAmelCase = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__a , __a , __a )
__lowerCAmelCase = tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
__lowerCAmelCase = tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __a , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __a , atol=self.tolerance )
| 57
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__lowerCAmelCase = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
| 1
|
"""simple docstring"""
from ... import PretrainedConfig
A : List[Any] = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__UpperCAmelCase : Union[str, Any] ="""nezha"""
def __init__( self , __a=2_11_28 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=64 , __a=2 , __a=0.0_2 , __a=1e-1_2 , __a=0.1 , __a=0 , __a=2 , __a=3 , __a=True , **__a , ):
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = max_relative_position
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = classifier_dropout
__lowerCAmelCase = use_cache
| 57
|
"""simple docstring"""
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = 1
while len(_UpperCamelCase ) < 1e6:
constant.append(str(_UpperCamelCase ) )
i += 1
__lowerCAmelCase = "".join(_UpperCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 57
| 1
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
A : Optional[int] = random.Random()
if is_torch_available():
import torch
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ):
'''simple docstring'''
if rng is None:
__lowerCAmelCase = global_rng
__lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __a , __a=7 , __a=4_00 , __a=20_00 , __a=1 , __a=0.0 , __a=1_60_00 , __a=True , __a=True , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = min_seq_length
__lowerCAmelCase = max_seq_length
__lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCAmelCase = feature_size
__lowerCAmelCase = padding_value
__lowerCAmelCase = sampling_rate
__lowerCAmelCase = return_attention_mask
__lowerCAmelCase = do_normalize
def snake_case ( self ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def snake_case ( self , __a=False , __a=False ):
def _flatten(__a ):
return list(itertools.chain(*__a ) )
if equal_length:
__lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCAmelCase = [np.asarray(__a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _UpperCamelCase ( lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =ASTFeatureExtractor
def snake_case ( self ):
__lowerCAmelCase = ASTFeatureExtractionTester(self )
def snake_case ( self ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
__lowerCAmelCase = [np.asarray(__a ) for speech_input in speech_inputs]
# Test not batched input
__lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
__lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(__a , __a , atol=1e-3 ) )
# Test batched
__lowerCAmelCase = feat_extract(__a , padding=__a , return_tensors="np" ).input_values
__lowerCAmelCase = feat_extract(__a , padding=__a , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(__a , __a ):
self.assertTrue(np.allclose(__a , __a , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
__lowerCAmelCase = np.asarray(__a )
__lowerCAmelCase = feat_extract(__a , return_tensors="np" ).input_values
__lowerCAmelCase = feat_extract(__a , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(__a , __a ):
self.assertTrue(np.allclose(__a , __a , atol=1e-3 ) )
@require_torch
def snake_case ( self ):
import torch
__lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCAmelCase = np.random.rand(1_00 ).astype(np.floataa )
__lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowerCAmelCase = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__lowerCAmelCase = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def snake_case ( self , __a ):
from datasets import load_dataset
__lowerCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__lowerCAmelCase = ds.sort("id" ).select(range(__a ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def snake_case ( self ):
# fmt: off
__lowerCAmelCase = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
__lowerCAmelCase = self._load_datasamples(1 )
__lowerCAmelCase = ASTFeatureExtractor()
__lowerCAmelCase = feature_extractor(__a , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 10_24, 1_28) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , __a , atol=1e-4 ) )
| 57
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A : Union[str, Any] = imread(R"digital_image_processing/image_data/lena_small.jpg")
A : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = cn.convert_to_negative(_UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
'''simple docstring'''
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCamelCase , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase = canny.canny(_UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert gg.gaussian_filter(_UpperCamelCase , 5 , sigma=0.9 ).all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCAmelCase = conv.img_convolve(_UpperCamelCase , _UpperCamelCase ).astype(_UpperCamelCase )
assert res.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert med.median_filter(_UpperCamelCase , 3 ).any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(_UpperCamelCase )
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = sp.make_sepia(_UpperCamelCase , 20 )
assert sepia.all()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
__lowerCAmelCase = bs.Burkes(imread(_UpperCamelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
__lowerCAmelCase = rs.NearestNeighbour(imread(_UpperCamelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
__lowerCAmelCase = imread(_UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = image[x_coordinate][y_coordinate]
__lowerCAmelCase = lbp.get_neighbors_pixel(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__lowerCAmelCase = lbp.local_binary_value(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert lbp_image.any()
| 57
| 1
|
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = 0
def snake_case ( self ):
__lowerCAmelCase = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32" )
self.assertIsInstance(__a , __a )
def snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = Path(__a ) / "preprocessor_config.json"
__lowerCAmelCase = Path(__a ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def snake_case ( self ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = Path(__a ) / "preprocessor_config.json"
__lowerCAmelCase = Path(__a ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__lowerCAmelCase = Path(__a ) / "preprocessor_config.json"
__lowerCAmelCase = Path(__a ) / "config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__a ).to_dict()
config_dict.pop("image_processor_type" )
__lowerCAmelCase = CLIPImageProcessor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
__lowerCAmelCase = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def snake_case ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = Path(__a ) / "preprocessor_config.json"
json.dump(
{"image_processor_type": "CLIPImageProcessor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def snake_case ( self ):
with self.assertRaisesRegex(
__a , "clip-base is not a local folder and is not a valid model identifier" ):
__lowerCAmelCase = AutoImageProcessor.from_pretrained("clip-base" )
def snake_case ( self ):
with self.assertRaisesRegex(
__a , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__a , revision="aaaaaa" )
def snake_case ( self ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
__lowerCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/config-no-model" )
def snake_case ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
__lowerCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
__lowerCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , "NewImageProcessor" )
def snake_case ( self ):
try:
AutoConfig.register("custom" , __a )
AutoImageProcessor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoImageProcessor.register(__a , __a )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = Path(__a ) / "preprocessor_config.json"
__lowerCAmelCase = Path(__a ) / "config.json"
json.dump(
{"feature_extractor_type": "CLIPFeatureExtractor", "processor_class": "CLIPProcessor"} , open(__a , "w" ) , )
json.dump({"model_type": "clip"} , open(__a , "w" ) )
__lowerCAmelCase = CustomImageProcessor.from_pretrained(__a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__a )
__lowerCAmelCase = AutoImageProcessor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def snake_case ( self ):
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Dict =True
try:
AutoConfig.register("custom" , __a )
AutoImageProcessor.register(__a , __a )
# If remote code is not set, the default is to use local
__lowerCAmelCase = AutoImageProcessor.from_pretrained("hf-internal-testing/test_dynamic_image_processor" )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
__lowerCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
__lowerCAmelCase = AutoImageProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_image_processor" , trust_remote_code=__a )
self.assertEqual(image_processor.__class__.__name__ , "NewImageProcessor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 57
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Optional[int] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 1
|
"""simple docstring"""
import operator as op
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = lambda _UpperCamelCase , _UpperCamelCase : int(x / y ) # noqa: E731 integer division operation
__lowerCAmelCase = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(_UpperCamelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_UpperCamelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(_UpperCamelCase ) , sep=" | " )
else:
__lowerCAmelCase = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(_UpperCamelCase ) , sep=" | " )
__lowerCAmelCase = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(_UpperCamelCase ) , sep=" | " )
stack.append(
str(opr[x](int(_UpperCamelCase ) , int(_UpperCamelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(_UpperCamelCase ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
A : Optional[Any] = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix))
| 57
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
A : Dict = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[float] =field(
default=0.0 ,metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
__UpperCAmelCase : bool =field(default=lowerCAmelCase__ ,metadata={"""help""": """Whether to SortishSamler or not."""} )
__UpperCAmelCase : bool =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
__UpperCAmelCase : bool =field(default=lowerCAmelCase__ ,metadata={"""help""": """whether to use adafactor"""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(default=lowerCAmelCase__ ,metadata={"""help""": """Dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[str] =field(
default="""linear""" ,metadata={"""help""": F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} ,)
| 57
| 1
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__lowerCAmelCase = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__lowerCAmelCase = 4
__lowerCAmelCase = 48
__lowerCAmelCase = "pixelshuffle_aux"
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__lowerCAmelCase = [6, 6, 6, 6]
__lowerCAmelCase = 60
__lowerCAmelCase = [6, 6, 6, 6]
__lowerCAmelCase = "pixelshuffledirect"
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__lowerCAmelCase = 4
__lowerCAmelCase = "nearest+conv"
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__lowerCAmelCase = 1
__lowerCAmelCase = 1
__lowerCAmelCase = 126
__lowerCAmelCase = 7
__lowerCAmelCase = 2_55.0
__lowerCAmelCase = ""
return config
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
__lowerCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__lowerCAmelCase = name.replace("patch_embed.norm" , "embeddings.patch_embeddings.layernorm" )
if "layers" in name:
__lowerCAmelCase = name.replace("layers" , "encoder.stages" )
if "residual_group.blocks" in name:
__lowerCAmelCase = name.replace("residual_group.blocks" , "layers" )
if "attn.proj" in name:
__lowerCAmelCase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__lowerCAmelCase = name.replace("attn" , "attention.self" )
if "norm1" in name:
__lowerCAmelCase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__lowerCAmelCase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__lowerCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__lowerCAmelCase = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
__lowerCAmelCase = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
__lowerCAmelCase = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
__lowerCAmelCase = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
__lowerCAmelCase = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if "patch_embed.proj" in name:
__lowerCAmelCase = name.replace("patch_embed.proj" , "patch_embed.projection" )
if name == "norm.weight":
__lowerCAmelCase = "layernorm.weight"
if name == "norm.bias":
__lowerCAmelCase = "layernorm.bias"
if "conv_first" in name:
__lowerCAmelCase = name.replace("conv_first" , "first_convolution" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__lowerCAmelCase = name.replace("conv_last" , "final_convolution" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__lowerCAmelCase = name.replace("conv_before_upsample.0" , "conv_before_upsample" )
if "upsample.0" in name:
__lowerCAmelCase = name.replace("upsample.0" , "upsample.convolution_0" )
if "upsample.2" in name:
__lowerCAmelCase = name.replace("upsample.2" , "upsample.convolution_1" )
__lowerCAmelCase = "upsample." + name
elif config.upsampler == "pixelshuffledirect":
__lowerCAmelCase = name.replace("upsample.0.weight" , "upsample.conv.weight" )
__lowerCAmelCase = name.replace("upsample.0.bias" , "upsample.conv.bias" )
else:
pass
else:
__lowerCAmelCase = "swin2sr." + name
return name
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__lowerCAmelCase = orig_state_dict.pop(_UpperCamelCase )
if "qkv" in key:
__lowerCAmelCase = key.split("." )
__lowerCAmelCase = int(key_split[1] )
__lowerCAmelCase = int(key_split[4] )
__lowerCAmelCase = config.embed_dim
if "weight" in key:
__lowerCAmelCase = val[:dim, :]
__lowerCAmelCase = val[dim : dim * 2, :]
__lowerCAmelCase = val[-dim:, :]
else:
__lowerCAmelCase = val[:dim]
__lowerCAmelCase = val[dim : dim * 2]
__lowerCAmelCase = val[-dim:]
pass
else:
__lowerCAmelCase = val
return orig_state_dict
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = get_config(_UpperCamelCase )
__lowerCAmelCase = SwinaSRForImageSuperResolution(_UpperCamelCase )
model.eval()
__lowerCAmelCase = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location="cpu" )
__lowerCAmelCase = convert_state_dict(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
raise ValueError("Missing keys when converting: {}".format(_UpperCamelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"Unexpected key {key} in state_dict" )
# verify values
__lowerCAmelCase = "https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"
__lowerCAmelCase = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert("RGB" )
__lowerCAmelCase = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__lowerCAmelCase = 126 if "Jpeg" in checkpoint_url else 256
__lowerCAmelCase = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
__lowerCAmelCase = transforms(_UpperCamelCase ).unsqueeze(0 )
if config.num_channels == 1:
__lowerCAmelCase = pixel_values[:, 0, :, :].unsqueeze(1 )
__lowerCAmelCase = model(_UpperCamelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__lowerCAmelCase = torch.Size([1, 3, 512, 512] )
__lowerCAmelCase = torch.tensor(
[[-0.70_87, -0.71_38, -0.67_21], [-0.83_40, -0.80_95, -0.72_98], [-0.91_49, -0.84_14, -0.79_40]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
__lowerCAmelCase = torch.tensor(
[[-0.77_75, -0.81_05, -0.89_33], [-0.77_64, -0.83_56, -0.92_25], [-0.79_76, -0.86_86, -0.95_79]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
__lowerCAmelCase = torch.tensor(
[[-0.80_35, -0.75_04, -0.74_91], [-0.85_38, -0.81_24, -0.77_82], [-0.88_04, -0.86_51, -0.84_93]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__lowerCAmelCase = torch.Size([1, 3, 512, 512] )
__lowerCAmelCase = torch.tensor(
[[-0.76_69, -0.86_62, -0.87_67], [-0.88_10, -0.99_62, -0.98_20], [-0.93_40, -1.03_22, -1.11_49]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__lowerCAmelCase = torch.Size([1, 3, 1024, 1024] )
__lowerCAmelCase = torch.tensor(
[[-0.52_38, -0.55_57, -0.63_21], [-0.60_16, -0.59_03, -0.63_91], [-0.62_44, -0.63_34, -0.68_89]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _UpperCamelCase , atol=1e-3 )
print("Looks ok!" )
__lowerCAmelCase = {
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth": (
"swin2SR-classical-sr-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth": (
"swin2SR-classical-sr-x4-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth": (
"swin2SR-compressed-sr-x4-48"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth": (
"swin2SR-lightweight-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth": (
"swin2SR-realworld-sr-x4-64-bsrgan-psnr"
),
}
__lowerCAmelCase = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCamelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
model.push_to_hub(f"caidas/{model_name}" )
processor.push_to_hub(f"caidas/{model_name}" )
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
A : int = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 57
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
A : Any = "examples/"
A : Optional[Any] = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
A : Optional[int] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
A : List[Any] = "README.md"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase , __lowerCAmelCase = REPLACE_PATTERNS[pattern]
__lowerCAmelCase = replace.replace("VERSION" , _UpperCamelCase )
__lowerCAmelCase = re_pattern.sub(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for folder, directories, fnames in os.walk(_UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , pattern="examples" )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not patch:
update_version_in_examples(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "🤗 Transformers currently provides the following architectures"
__lowerCAmelCase = "1. Want to contribute a new model?"
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.readlines()
# Find the start of the list.
__lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
__lowerCAmelCase = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["init"] , "r" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(_UpperCamelCase ).groups()[0]
return packaging.version.parse(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase=False ):
'''simple docstring'''
__lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
__lowerCAmelCase = default_version.base_version
elif patch:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__lowerCAmelCase = input(f"Which version are you releasing? [{default_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = default_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase , patch=_UpperCamelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = get_version()
__lowerCAmelCase = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
__lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
__lowerCAmelCase = input(f"Which version are we developing now? [{dev_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = dev_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
A : Dict = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 57
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Tuple = logging.get_logger(__name__)
A : int = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] ="""roc_bert"""
def __init__( self , __a=3_05_22 , __a=7_68 , __a=12 , __a=12 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=2 , __a=0.0_2 , __a=1e-1_2 , __a=True , __a=0 , __a="absolute" , __a=None , __a=True , __a=True , __a=7_68 , __a=9_10 , __a=5_12 , __a=2_48_58 , __a=True , **__a , ):
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = initializer_range
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = use_cache
__lowerCAmelCase = enable_pronunciation
__lowerCAmelCase = enable_shape
__lowerCAmelCase = pronunciation_embed_dim
__lowerCAmelCase = pronunciation_vocab_size
__lowerCAmelCase = shape_embed_dim
__lowerCAmelCase = shape_vocab_size
__lowerCAmelCase = concat_input
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = classifier_dropout
super().__init__(pad_token_id=__a , **__a )
| 57
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Tuple = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 1
|
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
A : Dict = logging.getLogger(__name__)
A : int = tf.data.AUTOTUNE
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=_UpperCamelCase , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=_UpperCamelCase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=_UpperCamelCase , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=_UpperCamelCase , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=_UpperCamelCase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=_UpperCamelCase , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=_UpperCamelCase , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=_UpperCamelCase , default=2**18 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=_UpperCamelCase , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=_UpperCamelCase , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=_UpperCamelCase , default=1e-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=_UpperCamelCase , default=1e-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=_UpperCamelCase , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=_UpperCamelCase , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=_UpperCamelCase , required=_UpperCamelCase , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=_UpperCamelCase , help="Model ID to upload to on the Hugging Face Hub." )
__lowerCAmelCase = parser.parse_args()
return args
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
try:
if args.tpu_name:
__lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
__lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(_UpperCamelCase )
tf.tpu.experimental.initialize_tpu_system(_UpperCamelCase )
return tpu
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
for file in file_list:
__lowerCAmelCase = file.split("/" )[-1]
__lowerCAmelCase = re.search(R"-\d+-(\d+)\.tfrecord" , _UpperCamelCase ).group(1 )
__lowerCAmelCase = int(_UpperCamelCase )
num_samples += sample_count
return num_samples
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
'''simple docstring'''
__lowerCAmelCase = count_samples(_UpperCamelCase )
__lowerCAmelCase = tf.data.Dataset.from_tensor_slices(_UpperCamelCase )
if shuffle:
__lowerCAmelCase = dataset.shuffle(len(_UpperCamelCase ) )
__lowerCAmelCase = tf.data.TFRecordDataset(_UpperCamelCase , num_parallel_reads=_UpperCamelCase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
__lowerCAmelCase = dataset.apply(tf.data.experimental.assert_cardinality(_UpperCamelCase ) )
__lowerCAmelCase = dataset.map(_UpperCamelCase , num_parallel_calls=_UpperCamelCase )
if shuffle:
assert shuffle_buffer_size is not None
__lowerCAmelCase = dataset.shuffle(args.shuffle_buffer_size )
__lowerCAmelCase = dataset.batch(_UpperCamelCase , drop_remainder=_UpperCamelCase )
__lowerCAmelCase = dataset.map(_UpperCamelCase , num_parallel_calls=_UpperCamelCase )
__lowerCAmelCase = dataset.prefetch(_UpperCamelCase )
return dataset
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if not args.no_tpu:
__lowerCAmelCase = initialize_tpu(_UpperCamelCase )
__lowerCAmelCase = tf.distribute.TPUStrategy(_UpperCamelCase )
else:
__lowerCAmelCase = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
__lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer )
__lowerCAmelCase = AutoConfig.from_pretrained(args.pretrained_model_config )
__lowerCAmelCase = tokenizer.vocab_size
__lowerCAmelCase = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(f"No .tfrecord files found in {args.train_dataset}." )
__lowerCAmelCase = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(f"No .tfrecord files found in {args.eval_dataset}." )
__lowerCAmelCase = count_samples(_UpperCamelCase )
__lowerCAmelCase = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
__lowerCAmelCase = steps_per_epoch * args.num_epochs
with strategy.scope():
__lowerCAmelCase = TFAutoModelForMaskedLM.from_config(_UpperCamelCase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
__lowerCAmelCase , __lowerCAmelCase = create_optimizer(
num_train_steps=_UpperCamelCase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_UpperCamelCase , metrics=["accuracy"] )
def decode_fn(_UpperCamelCase ):
__lowerCAmelCase = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_UpperCamelCase , _UpperCamelCase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
__lowerCAmelCase = DataCollatorForLanguageModeling(
tokenizer=_UpperCamelCase , mlm_probability=args.mlm_probability , mlm=_UpperCamelCase , return_tensors="tf" )
def mask_with_collator(_UpperCamelCase ):
# TF really needs an isin() function
__lowerCAmelCase = (
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
__lowerCAmelCase , __lowerCAmelCase = data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(_UpperCamelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_UpperCamelCase , )
return batch
__lowerCAmelCase = args.per_replica_batch_size * strategy.num_replicas_in_sync
__lowerCAmelCase = prepare_dataset(
_UpperCamelCase , decode_fn=_UpperCamelCase , mask_fn=_UpperCamelCase , batch_size=_UpperCamelCase , shuffle=_UpperCamelCase , shuffle_buffer_size=args.shuffle_buffer_size , )
__lowerCAmelCase = prepare_dataset(
_UpperCamelCase , decode_fn=_UpperCamelCase , mask_fn=_UpperCamelCase , batch_size=_UpperCamelCase , shuffle=_UpperCamelCase , )
__lowerCAmelCase = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_UpperCamelCase ) )
model.fit(
_UpperCamelCase , validation_data=_UpperCamelCase , epochs=args.num_epochs , callbacks=_UpperCamelCase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
A : List[Any] = parse_args()
main(args)
| 57
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase = 6008_5147_5143 ):
'''simple docstring'''
try:
__lowerCAmelCase = int(_UpperCamelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__lowerCAmelCase = 2
__lowerCAmelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__lowerCAmelCase = i
while n % i == 0:
__lowerCAmelCase = n // i
i += 1
return int(_UpperCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 57
| 1
|
"""simple docstring"""
import collections
import os
import re
from pathlib import Path
A : Optional[int] = "src/transformers"
# Matches is_xxx_available()
A : Optional[Any] = re.compile(R"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
A : int = re.compile(R"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
A : Tuple = re.compile(R"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
A : Union[str, Any] = re.compile(R"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
A : Optional[int] = re.compile(R"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
A : Union[str, Any] = re.compile(R"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
A : Optional[Any] = re.compile(R"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
A : Optional[int] = re.compile(R"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
A : Union[str, Any] = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
A : Optional[Any] = re.compile(R"^\s*try:")
# Catches a line with else:
A : str = re.compile(R"^\s*else:")
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if _re_test_backend.search(_UpperCamelCase ) is None:
return None
__lowerCAmelCase = [b[0] for b in _re_backend.findall(_UpperCamelCase )]
backends.sort()
return "_and_".join(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.readlines()
__lowerCAmelCase = 0
while line_index < len(_UpperCamelCase ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_UpperCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
__lowerCAmelCase = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
__lowerCAmelCase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_UpperCamelCase ):
__lowerCAmelCase = _re_one_line_import_struct.search(_UpperCamelCase ).groups()[0]
__lowerCAmelCase = re.findall(R"\[([^\]]+)\]" , _UpperCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
__lowerCAmelCase = _re_import_struct_key_value.search(_UpperCamelCase )
if single_line_import_search is not None:
__lowerCAmelCase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(_UpperCamelCase ) > 0]
objects.extend(_UpperCamelCase )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
__lowerCAmelCase = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowerCAmelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowerCAmelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowerCAmelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
__lowerCAmelCase = lines[line_index]
if _re_import_struct_add_one.search(_UpperCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(_UpperCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(_UpperCamelCase ) is not None:
__lowerCAmelCase = _re_import_struct_add_many.search(_UpperCamelCase ).groups()[0].split(", " )
__lowerCAmelCase = [obj[1:-1] for obj in imports if len(_UpperCamelCase ) > 0]
objects.extend(_UpperCamelCase )
elif _re_between_brackets.search(_UpperCamelCase ) is not None:
__lowerCAmelCase = _re_between_brackets.search(_UpperCamelCase ).groups()[0].split(", " )
__lowerCAmelCase = [obj[1:-1] for obj in imports if len(_UpperCamelCase ) > 0]
objects.extend(_UpperCamelCase )
elif _re_quote_object.search(_UpperCamelCase ) is not None:
objects.append(_re_quote_object.search(_UpperCamelCase ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
__lowerCAmelCase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowerCAmelCase = []
while (
line_index < len(_UpperCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
__lowerCAmelCase = lines[line_index]
__lowerCAmelCase = _re_import.search(_UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowerCAmelCase = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(_UpperCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowerCAmelCase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowerCAmelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowerCAmelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
__lowerCAmelCase = lines[line_index]
__lowerCAmelCase = _re_import.search(_UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
__lowerCAmelCase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
def find_duplicates(_UpperCamelCase ):
return [k for k, v in collections.Counter(_UpperCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowerCAmelCase = []
for key in import_dict_objects.keys():
__lowerCAmelCase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}" )
__lowerCAmelCase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowerCAmelCase = "base imports" if key == "none" else f"{key} backend"
errors.append(f"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f" {a} in _import_structure but not in TYPE_HINT." )
return errors
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = []
for root, _, files in os.walk(_UpperCamelCase ):
if "__init__.py" in files:
__lowerCAmelCase = os.path.join(_UpperCamelCase , "__init__.py" )
__lowerCAmelCase = parse_init(_UpperCamelCase )
if objects is not None:
__lowerCAmelCase = analyze_results(*_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
__lowerCAmelCase = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("\n".join(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
raise ValueError("\n\n".join(_UpperCamelCase ) )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = []
for path, directories, files in os.walk(_UpperCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(_UpperCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_UpperCamelCase ) / folder).glob("*.py" ) ) ) == 0:
continue
__lowerCAmelCase = str((Path(_UpperCamelCase ) / folder).relative_to(_UpperCamelCase ) )
__lowerCAmelCase = short_path.replace(os.path.sep , "." )
submodules.append(_UpperCamelCase )
for fname in files:
if fname == "__init__.py":
continue
__lowerCAmelCase = str((Path(_UpperCamelCase ) / fname).relative_to(_UpperCamelCase ) )
__lowerCAmelCase = short_path.replace(".py" , "" ).replace(os.path.sep , "." )
if len(submodule.split("." ) ) == 1:
submodules.append(_UpperCamelCase )
return submodules
A : Optional[Any] = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def _lowerCamelCase ( ):
'''simple docstring'''
from transformers.utils import direct_transformers_import
__lowerCAmelCase = direct_transformers_import(_UpperCamelCase )
__lowerCAmelCase = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_UpperCamelCase , "__init__.py" ) , "r" ) as f:
__lowerCAmelCase = f.read()
import_structure_keys.update(set(re.findall(R"import_structure\[\"([^\"]*)\"\]" , _UpperCamelCase ) ) )
__lowerCAmelCase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_UpperCamelCase ) > 0:
__lowerCAmelCase = "\n".join(f"- {module}" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registed in the main init of Transformers:\n"
f"{list_of_modules}\n"
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 57
|
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a=None , __a=True , __a=None , **__a ):
__lowerCAmelCase = parent
__lowerCAmelCase = config_class
__lowerCAmelCase = has_text_modality
__lowerCAmelCase = kwargs
__lowerCAmelCase = common_properties
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(__a , __a ) , msg=f"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(__a ):
try:
setattr(__a , __a , __a )
self.parent.assertEqual(
getattr(__a , __a ) , __a , msg=f"`{name} value {idx} expected, but was {getattr(__a , __a )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(__a ):
try:
__lowerCAmelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(__a , __a ) , __a , msg=f"`{name} value {idx} expected, but was {getattr(__a , __a )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , __a )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(__a , "config.json" )
config_first.to_json_file(__a )
__lowerCAmelCase = self.config_class.from_json_file(__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(__a )
__lowerCAmelCase = self.config_class.from_pretrained(__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(__a , __a )
config_first.save_pretrained(__a )
__lowerCAmelCase = self.config_class.from_pretrained(__a , subfolder=__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__lowerCAmelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def snake_case ( self ):
if self.config_class.is_composition:
return
__lowerCAmelCase = self.config_class()
self.parent.assertIsNotNone(__a )
def snake_case ( self ):
__lowerCAmelCase = copy.deepcopy(__a )
__lowerCAmelCase = self.config_class(**__a )
__lowerCAmelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(__a , __a ) != value:
wrong_values.append((key, getattr(__a , __a ), value) )
if len(__a ) > 0:
__lowerCAmelCase = "\n".join([f"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(f"The following keys were not properly set in the config:\n{errors}" )
def snake_case ( self ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 57
| 1
|
"""simple docstring"""
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = multiprocessing.Manager()
__lowerCAmelCase = manager.list()
__lowerCAmelCase = multiprocessing.Process(target=_UpperCamelCase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("timed out" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
__lowerCAmelCase = shutil.rmtree
__lowerCAmelCase = os.rmdir
__lowerCAmelCase = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
__lowerCAmelCase = {}
with swallow_io():
with time_limit(_UpperCamelCase ):
exec(_UpperCamelCase , _UpperCamelCase )
result.append("passed" )
except TimeoutException:
result.append("timed out" )
except BaseException as e:
result.append(f"failed: {e}" )
# Needed for cleaning up.
__lowerCAmelCase = rmtree
__lowerCAmelCase = rmdir
__lowerCAmelCase = chdir
@contextlib.contextmanager
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
def signal_handler(_UpperCamelCase , _UpperCamelCase ):
raise TimeoutException("Timed out!" )
signal.setitimer(signal.ITIMER_REAL , _UpperCamelCase )
signal.signal(signal.SIGALRM , _UpperCamelCase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = WriteOnlyStringIO()
with contextlib.redirect_stdout(_UpperCamelCase ):
with contextlib.redirect_stderr(_UpperCamelCase ):
with redirect_stdin(_UpperCamelCase ):
yield
@contextlib.contextmanager
def _lowerCamelCase ( ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(_UpperCamelCase ):
yield dirname
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
pass
class _UpperCamelCase ( io.StringIO ):
'''simple docstring'''
def snake_case ( self , *__a , **__a ):
raise OSError
def snake_case ( self , *__a , **__a ):
raise OSError
def snake_case ( self , *__a , **__a ):
raise OSError
def snake_case ( self , *__a , **__a ):
return False
class _UpperCamelCase ( contextlib._RedirectStream ): # type: ignore
'''simple docstring'''
__UpperCAmelCase : Dict ="""stdin"""
@contextlib.contextmanager
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if root == ".":
yield
return
__lowerCAmelCase = os.getcwd()
os.chdir(_UpperCamelCase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase=None ):
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
__lowerCAmelCase = None
__lowerCAmelCase = None
import os
__lowerCAmelCase = "1"
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
import shutil
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
import subprocess
__lowerCAmelCase = None # type: ignore
__lowerCAmelCase = None
import sys
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
| 57
|
"""simple docstring"""
A : int = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 57
| 1
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A : Union[str, Any] = imread(R"digital_image_processing/image_data/lena_small.jpg")
A : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = cn.convert_to_negative(_UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
'''simple docstring'''
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCamelCase , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase = canny.canny(_UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert gg.gaussian_filter(_UpperCamelCase , 5 , sigma=0.9 ).all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCAmelCase = conv.img_convolve(_UpperCamelCase , _UpperCamelCase ).astype(_UpperCamelCase )
assert res.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert med.median_filter(_UpperCamelCase , 3 ).any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(_UpperCamelCase )
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = sp.make_sepia(_UpperCamelCase , 20 )
assert sepia.all()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
__lowerCAmelCase = bs.Burkes(imread(_UpperCamelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
__lowerCAmelCase = rs.NearestNeighbour(imread(_UpperCamelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
__lowerCAmelCase = imread(_UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = image[x_coordinate][y_coordinate]
__lowerCAmelCase = lbp.get_neighbors_pixel(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__lowerCAmelCase = lbp.local_binary_value(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert lbp_image.any()
| 57
|
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A : str = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =["""input_ids""", """attention_mask"""]
def __init__( self , __a="</s>" , __a="<unk>" , __a="<pad>" , __a=1_25 , __a=None , **__a , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__lowerCAmelCase = [f"<extra_id_{i}>" for i in range(__a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__lowerCAmelCase = len(set(filter(lambda __a : bool("extra_id" in str(__a ) ) , __a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
super().__init__(
eos_token=__a , unk_token=__a , pad_token=__a , extra_ids=__a , additional_special_tokens=__a , **__a , )
__lowerCAmelCase = extra_ids
__lowerCAmelCase = 2**8 # utf is 8 bits
# define special tokens dict
__lowerCAmelCase = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__lowerCAmelCase = len(self.special_tokens_encoder )
__lowerCAmelCase = len(__a )
for i, token in enumerate(__a ):
__lowerCAmelCase = self.vocab_size + i - n
__lowerCAmelCase = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def snake_case ( self ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def snake_case ( self , __a , __a = None , __a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__a )) + [1]
return ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
def snake_case ( self , __a ):
if len(__a ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = self._add_eos_if_not_present(__a )
if token_ids_a is None:
return token_ids_a
else:
__lowerCAmelCase = self._add_eos_if_not_present(__a )
return token_ids_a + token_ids_a
def snake_case ( self , __a ):
__lowerCAmelCase = [chr(__a ) for i in text.encode("utf-8" )]
return tokens
def snake_case ( self , __a ):
if token in self.special_tokens_encoder:
__lowerCAmelCase = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__lowerCAmelCase = self.added_tokens_encoder[token]
elif len(__a ) != 1:
__lowerCAmelCase = self.unk_token_id
else:
__lowerCAmelCase = ord(__a ) + self._num_special_tokens
return token_id
def snake_case ( self , __a ):
if index in self.special_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[index]
else:
__lowerCAmelCase = chr(index - self._num_special_tokens )
return token
def snake_case ( self , __a ):
__lowerCAmelCase = B""
for token in tokens:
if token in self.special_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
__lowerCAmelCase = token.encode("utf-8" )
elif token in self.added_tokens_encoder:
__lowerCAmelCase = token.encode("utf-8" )
else:
__lowerCAmelCase = bytes([ord(__a )] )
bstring += tok_string
__lowerCAmelCase = bstring.decode("utf-8" , errors="ignore" )
return string
def snake_case ( self , __a , __a = None ):
return ()
| 57
| 1
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if n == 1 or not isinstance(_UpperCamelCase , _UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
__lowerCAmelCase = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
__lowerCAmelCase = 2
while digits < n:
index += 1
__lowerCAmelCase = len(str(fibonacci(_UpperCamelCase ) ) )
return index
def _lowerCamelCase ( _UpperCamelCase = 1000 ):
'''simple docstring'''
return fibonacci_digits_index(_UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 57
|
"""simple docstring"""
import numpy
# List of input, output pairs
A : Any = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
A : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
A : Union[str, Any] = [2, 4, 1, 5]
A : int = len(train_data)
A : Dict = 0.009
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase="train" ):
'''simple docstring'''
return calculate_hypothesis_value(_UpperCamelCase , _UpperCamelCase ) - output(
_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
for i in range(len(_UpperCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=m ):
'''simple docstring'''
__lowerCAmelCase = 0
for i in range(_UpperCamelCase ):
if index == -1:
summation_value += _error(_UpperCamelCase )
else:
summation_value += _error(_UpperCamelCase ) * train_data[i][0][index]
return summation_value
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = summation_of_cost_derivative(_UpperCamelCase , _UpperCamelCase ) / m
return cost_derivative_value
def _lowerCamelCase ( ):
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__lowerCAmelCase = 0.00_00_02
__lowerCAmelCase = 0
__lowerCAmelCase = 0
while True:
j += 1
__lowerCAmelCase = [0, 0, 0, 0]
for i in range(0 , len(_UpperCamelCase ) ):
__lowerCAmelCase = get_cost_derivative(i - 1 )
__lowerCAmelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_UpperCamelCase , _UpperCamelCase , atol=_UpperCamelCase , rtol=_UpperCamelCase , ):
break
__lowerCAmelCase = temp_parameter_vector
print(("Number of iterations:", j) )
def _lowerCamelCase ( ):
'''simple docstring'''
for i in range(len(_UpperCamelCase ) ):
print(("Actual output value:", output(_UpperCamelCase , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(_UpperCamelCase , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 57
| 1
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
__lowerCAmelCase = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
A : Union[str, Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
A : str = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 57
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__lowerCAmelCase = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
"do_convert_rgb": True,
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__a , __a )
def snake_case ( self , **__a ):
return BertTokenizer.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , **__a ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , **__a ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __a )
self.assertIsInstance(processor_fast.tokenizer , __a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __a )
self.assertIsInstance(processor_fast.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
__lowerCAmelCase = self.get_image_processor(do_normalize=__a )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__a , return_tensors="np" )
__lowerCAmelCase = processor(images=__a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = processor(text=__a )
__lowerCAmelCase = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__a )
__lowerCAmelCase = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 57
| 1
|
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self ):
__lowerCAmelCase = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
__lowerCAmelCase = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
model.to(__a )
from datasets import load_dataset
__lowerCAmelCase = load_dataset("nielsr/rvlcdip-demo" )
__lowerCAmelCase = dataset["train"][0]["image"].convert("RGB" )
__lowerCAmelCase = image_processor(__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**__a )
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = torch.Size((1, 16) )
self.assertEqual(logits.shape , __a )
__lowerCAmelCase = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=__a , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , __a , atol=1e-4 ) )
| 57
|
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( _UpperCamelCase = 4 ):
'''simple docstring'''
__lowerCAmelCase = abs(_UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(_UpperCamelCase )] for y in range(_UpperCamelCase )]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_row(transpose(_UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_row(reverse_column(_UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_column(transpose(_UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [list(_UpperCamelCase ) for x in zip(*_UpperCamelCase )]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = matrix[::-1]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [x[::-1] for x in matrix]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for i in matrix:
print(*_UpperCamelCase )
if __name__ == "__main__":
A : Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
A : List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
A : str = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 57
| 1
|
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCamelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCAmelCase = model
__lowerCAmelCase = 2
__lowerCAmelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def snake_case ( self ):
pass
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = LongformerModel.from_pretrained(_UpperCamelCase )
__lowerCAmelCase = LightningModel(_UpperCamelCase )
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
__lowerCAmelCase = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 57
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase : Union[str, Any] =TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = TextaTextGenerationPipeline(model=__a , tokenizer=__a )
return generator, ["Something to write", "Something else"]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = generator("Something there" )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) )
__lowerCAmelCase = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
__lowerCAmelCase = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
with self.assertRaises(__a ):
generator(4 )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
__lowerCAmelCase = 3
__lowerCAmelCase = generator(
"Something there" , num_return_sequences=__a , num_beams=__a , )
__lowerCAmelCase = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(__a , __a )
__lowerCAmelCase = generator("This is a test" , do_sample=__a , num_return_sequences=2 , return_tensors=__a )
self.assertEqual(
__a , [
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
] , )
__lowerCAmelCase = generator.model.config.eos_token_id
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = generator(
["This is a test", "This is a second test"] , do_sample=__a , num_return_sequences=2 , batch_size=2 , return_tensors=__a , )
self.assertEqual(
__a , [
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
] , )
@require_tf
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
| 57
| 1
|
"""simple docstring"""
import sys
from collections import defaultdict
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
return self.node_position[vertex]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = pos
def snake_case ( self , __a , __a , __a , __a ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCAmelCase = 2 * start + 1
else:
__lowerCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCAmelCase , __lowerCAmelCase = heap[smallest_child], positions[smallest_child]
__lowerCAmelCase , __lowerCAmelCase = (
heap[start],
positions[start],
)
__lowerCAmelCase , __lowerCAmelCase = temp, tempa
__lowerCAmelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __a )
self.top_to_bottom(__a , __a , __a , __a )
def snake_case ( self , __a , __a , __a , __a ):
__lowerCAmelCase = position[index]
while index != 0:
__lowerCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCAmelCase = heap[parent]
__lowerCAmelCase = position[parent]
self.set_position(position[parent] , __a )
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , __a )
break
__lowerCAmelCase = parent
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , 0 )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = len(__a ) // 2 - 1
for i in range(__a , -1 , -1 ):
self.top_to_bottom(__a , __a , len(__a ) , __a )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = positions[0]
__lowerCAmelCase = sys.maxsize
self.top_to_bottom(__a , 0 , len(__a ) , __a )
return temp
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = Heap()
__lowerCAmelCase = [0] * len(_UpperCamelCase )
__lowerCAmelCase = [-1] * len(_UpperCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCAmelCase = []
for vertex in range(len(_UpperCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCamelCase )
heap.node_position.append(_UpperCamelCase )
__lowerCAmelCase = []
__lowerCAmelCase = 1
__lowerCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCAmelCase = 0
__lowerCAmelCase = distance
heap.heapify(_UpperCamelCase , _UpperCamelCase )
for _ in range(1 , len(_UpperCamelCase ) ):
__lowerCAmelCase = heap.delete_minimum(_UpperCamelCase , _UpperCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCamelCase )]
):
__lowerCAmelCase = distance
heap.bottom_to_top(
_UpperCamelCase , heap.get_position(_UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A : Optional[Any] = int(input("Enter number of edges: ").strip())
A : Dict = defaultdict(list)
for _ in range(edges_number):
A : str = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 57
|
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCamelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCAmelCase = model
__lowerCAmelCase = 2
__lowerCAmelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def snake_case ( self ):
pass
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = LongformerModel.from_pretrained(_UpperCamelCase )
__lowerCAmelCase = LightningModel(_UpperCamelCase )
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
__lowerCAmelCase = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 57
| 1
|
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 57
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
__lowerCAmelCase = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
A : Union[str, Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
A : str = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 57
| 1
|
"""simple docstring"""
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any =(DDPMParallelScheduler,)
def snake_case ( self , **__a ):
__lowerCAmelCase = {
"num_train_timesteps": 10_00,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**__a )
return config
def snake_case ( self ):
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a )
def snake_case ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def snake_case ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__a )
def snake_case ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__a )
def snake_case ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__a )
def snake_case ( self ):
self.check_over_configs(thresholding=__a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__a , prediction_type=__a , sample_max_value=__a , )
def snake_case ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def snake_case ( self ):
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=__a )
def snake_case ( self ):
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5
def snake_case ( self ):
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__a )
__lowerCAmelCase = len(__a )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
__lowerCAmelCase = self.dummy_sample_deter + 0.1
__lowerCAmelCase = self.dummy_sample_deter - 0.1
__lowerCAmelCase = samplea.shape[0]
__lowerCAmelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
__lowerCAmelCase = torch.arange(__a )[0:3, None].repeat(1 , __a )
__lowerCAmelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__lowerCAmelCase = scheduler.batch_step_no_noise(__a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
__lowerCAmelCase = torch.sum(torch.abs(__a ) )
__lowerCAmelCase = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1e-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1e-3
def snake_case ( self ):
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__a )
__lowerCAmelCase = len(__a )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
__lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(__a ) ):
# 1. predict noise residual
__lowerCAmelCase = model(__a , __a )
# 2. predict previous mean of sample x_t-1
__lowerCAmelCase = scheduler.step(__a , __a , __a , generator=__a ).prev_sample
__lowerCAmelCase = pred_prev_sample
__lowerCAmelCase = torch.sum(torch.abs(__a ) )
__lowerCAmelCase = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def snake_case ( self ):
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(prediction_type="v_prediction" )
__lowerCAmelCase = scheduler_class(**__a )
__lowerCAmelCase = len(__a )
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
__lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(__a ) ):
# 1. predict noise residual
__lowerCAmelCase = model(__a , __a )
# 2. predict previous mean of sample x_t-1
__lowerCAmelCase = scheduler.step(__a , __a , __a , generator=__a ).prev_sample
__lowerCAmelCase = pred_prev_sample
__lowerCAmelCase = torch.sum(torch.abs(__a ) )
__lowerCAmelCase = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def snake_case ( self ):
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__a )
__lowerCAmelCase = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__a )
__lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(__a ):
if i == len(__a ) - 1:
__lowerCAmelCase = -1
else:
__lowerCAmelCase = timesteps[i + 1]
__lowerCAmelCase = scheduler.previous_timestep(__a )
__lowerCAmelCase = prev_t.item()
self.assertEqual(__a , __a )
def snake_case ( self ):
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__a )
__lowerCAmelCase = [1_00, 87, 50, 51, 0]
with self.assertRaises(__a , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=__a )
def snake_case ( self ):
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__a )
__lowerCAmelCase = [1_00, 87, 50, 1, 0]
__lowerCAmelCase = len(__a )
with self.assertRaises(__a , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=__a , timesteps=__a )
def snake_case ( self ):
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__a )
__lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__a , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=__a )
| 57
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""image_processor""", """tokenizer"""]
__UpperCAmelCase : Optional[Any] ="""CLIPImageProcessor"""
__UpperCAmelCase : Union[str, Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self , __a=None , __a=None , **__a ):
__lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
__lowerCAmelCase = kwargs.pop("feature_extractor" )
__lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self , __a=None , __a=None , __a=None , **__a ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__lowerCAmelCase = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
__lowerCAmelCase = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
__lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.decode(*__a , **__a )
@property
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 57
| 1
|
"""simple docstring"""
import numpy as np
from PIL import Image
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = np.array(_UpperCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# compute the shape of the output matrix
__lowerCAmelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__lowerCAmelCase = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__lowerCAmelCase = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
return updated_arr
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = np.array(_UpperCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError("The input array is not a square matrix" )
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
# compute the shape of the output matrix
__lowerCAmelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__lowerCAmelCase = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__lowerCAmelCase = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="avgpooling", verbose=True)
# Loading the image
A : str = Image.open("path_to_image")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 57
|
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _UpperCamelCase :
'''simple docstring'''
pass
| 57
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
A : Union[str, Any] = logging.get_logger(__name__)
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if isinstance(_UpperCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_UpperCamelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_UpperCamelCase ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] =["""pixel_values"""]
def __init__( self , __a = True , __a = None , __a = PILImageResampling.BILINEAR , __a = True , __a = None , __a = True , __a = 1 / 2_55 , __a = True , __a = True , __a = None , __a = None , **__a , ):
super().__init__(**__a )
__lowerCAmelCase = size if size is not None else {"shortest_edge": 2_56}
__lowerCAmelCase = get_size_dict(__a , default_to_square=__a )
__lowerCAmelCase = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
__lowerCAmelCase = get_size_dict(__a , param_name="crop_size" )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = crop_size
__lowerCAmelCase = resample
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = offset
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case ( self , __a , __a , __a = PILImageResampling.BILINEAR , __a = None , **__a , ):
__lowerCAmelCase = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" in size:
__lowerCAmelCase = get_resize_output_image_size(__a , size["shortest_edge"] , default_to_square=__a )
elif "height" in size and "width" in size:
__lowerCAmelCase = (size["height"], size["width"])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def snake_case ( self , __a , __a , __a = None , **__a , ):
__lowerCAmelCase = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(__a , size=(size["height"], size["width"]) , data_format=__a , **__a )
def snake_case ( self , __a , __a , __a = True , __a = None , **__a , ):
__lowerCAmelCase = image.astype(np.floataa )
if offset:
__lowerCAmelCase = image - (scale / 2)
return rescale(__a , scale=__a , data_format=__a , **__a )
def snake_case ( self , __a , __a , __a , __a = None , **__a , ):
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def snake_case ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
__lowerCAmelCase = to_numpy_array(__a )
if do_resize:
__lowerCAmelCase = self.resize(image=__a , size=__a , resample=__a )
if do_center_crop:
__lowerCAmelCase = self.center_crop(__a , size=__a )
if do_rescale:
__lowerCAmelCase = self.rescale(image=__a , scale=__a , offset=__a )
if do_normalize:
__lowerCAmelCase = self.normalize(image=__a , mean=__a , std=__a )
__lowerCAmelCase = to_channel_dimension_format(__a , __a )
return image
def snake_case ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ):
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = offset if offset is not None else self.offset
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(__a , default_to_square=__a )
__lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase = get_size_dict(__a , param_name="crop_size" )
if not valid_images(__a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
__lowerCAmelCase = make_batched(__a )
__lowerCAmelCase = [
[
self._preprocess_image(
image=__a , do_resize=__a , size=__a , resample=__a , do_center_crop=__a , crop_size=__a , do_rescale=__a , rescale_factor=__a , offset=__a , do_normalize=__a , image_mean=__a , image_std=__a , data_format=__a , )
for img in video
]
for video in videos
]
__lowerCAmelCase = {"pixel_values": videos}
return BatchFeature(data=__a , tensor_type=__a )
| 57
|
"""simple docstring"""
import sys
from collections import defaultdict
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
return self.node_position[vertex]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = pos
def snake_case ( self , __a , __a , __a , __a ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCAmelCase = 2 * start + 1
else:
__lowerCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCAmelCase , __lowerCAmelCase = heap[smallest_child], positions[smallest_child]
__lowerCAmelCase , __lowerCAmelCase = (
heap[start],
positions[start],
)
__lowerCAmelCase , __lowerCAmelCase = temp, tempa
__lowerCAmelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __a )
self.top_to_bottom(__a , __a , __a , __a )
def snake_case ( self , __a , __a , __a , __a ):
__lowerCAmelCase = position[index]
while index != 0:
__lowerCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCAmelCase = heap[parent]
__lowerCAmelCase = position[parent]
self.set_position(position[parent] , __a )
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , __a )
break
__lowerCAmelCase = parent
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , 0 )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = len(__a ) // 2 - 1
for i in range(__a , -1 , -1 ):
self.top_to_bottom(__a , __a , len(__a ) , __a )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = positions[0]
__lowerCAmelCase = sys.maxsize
self.top_to_bottom(__a , 0 , len(__a ) , __a )
return temp
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = Heap()
__lowerCAmelCase = [0] * len(_UpperCamelCase )
__lowerCAmelCase = [-1] * len(_UpperCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCAmelCase = []
for vertex in range(len(_UpperCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCamelCase )
heap.node_position.append(_UpperCamelCase )
__lowerCAmelCase = []
__lowerCAmelCase = 1
__lowerCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCAmelCase = 0
__lowerCAmelCase = distance
heap.heapify(_UpperCamelCase , _UpperCamelCase )
for _ in range(1 , len(_UpperCamelCase ) ):
__lowerCAmelCase = heap.delete_minimum(_UpperCamelCase , _UpperCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCamelCase )]
):
__lowerCAmelCase = distance
heap.bottom_to_top(
_UpperCamelCase , heap.get_position(_UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A : Optional[Any] = int(input("Enter number of edges: ").strip())
A : Dict = defaultdict(list)
for _ in range(edges_number):
A : str = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 57
| 1
|
"""simple docstring"""
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
A : Optional[List[str]] = None
A : Any = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
A : int = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : bool =True
__UpperCAmelCase : Optional[str] =None
# Automatically constructed
__UpperCAmelCase : ClassVar[str] ="PIL.Image.Image"
__UpperCAmelCase : ClassVar[Any] =pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
__UpperCAmelCase : str =field(default="""Image""" ,init=lowerCAmelCase__ ,repr=lowerCAmelCase__ )
def __call__( self ):
return self.pa_type
def snake_case ( self , __a ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(__a , __a ):
__lowerCAmelCase = np.array(__a )
if isinstance(__a , __a ):
return {"path": value, "bytes": None}
elif isinstance(__a , __a ):
return {"path": None, "bytes": value}
elif isinstance(__a , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__a )
elif isinstance(__a , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__a )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def snake_case ( self , __a , __a=None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
__lowerCAmelCase = {}
__lowerCAmelCase , __lowerCAmelCase = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}." )
else:
if is_local_path(__a ):
__lowerCAmelCase = PIL.Image.open(__a )
else:
__lowerCAmelCase = path.split("::" )[-1]
try:
__lowerCAmelCase = string_to_dict(__a , config.HUB_DATASETS_URL )["repo_id"]
__lowerCAmelCase = token_per_repo_id.get(__a )
except ValueError:
__lowerCAmelCase = None
with xopen(__a , "rb" , use_auth_token=__a ) as f:
__lowerCAmelCase = BytesIO(f.read() )
__lowerCAmelCase = PIL.Image.open(bytes_ )
else:
__lowerCAmelCase = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def snake_case ( self ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def snake_case ( self , __a ):
if pa.types.is_string(storage.type ):
__lowerCAmelCase = pa.array([None] * len(__a ) , type=pa.binary() )
__lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__lowerCAmelCase = pa.array([None] * len(__a ) , type=pa.string() )
__lowerCAmelCase = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
__lowerCAmelCase = storage.field("bytes" )
else:
__lowerCAmelCase = pa.array([None] * len(__a ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
__lowerCAmelCase = storage.field("path" )
else:
__lowerCAmelCase = pa.array([None] * len(__a ) , type=pa.string() )
__lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
__lowerCAmelCase = pa.array(
[encode_np_array(np.array(__a ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
__lowerCAmelCase = pa.array([None] * len(__a ) , type=pa.string() )
__lowerCAmelCase = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__a , self.pa_type )
def snake_case ( self , __a ):
@no_op_if_value_is_null
def path_to_bytes(__a ):
with xopen(__a , "rb" ) as f:
__lowerCAmelCase = f.read()
return bytes_
__lowerCAmelCase = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__lowerCAmelCase = pa.array(
[os.path.basename(__a ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
__lowerCAmelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(__a , self.pa_type )
def _lowerCamelCase ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
__lowerCAmelCase = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = BytesIO()
if image.format in list_image_compression_formats():
__lowerCAmelCase = image.format
else:
__lowerCAmelCase = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_UpperCamelCase , format=_UpperCamelCase )
return buffer.getvalue()
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if hasattr(_UpperCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
__lowerCAmelCase = array.dtype
__lowerCAmelCase = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
__lowerCAmelCase = dtype.kind
__lowerCAmelCase = dtype.itemsize
__lowerCAmelCase = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
__lowerCAmelCase = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays." )
if dtype is not dest_dtype:
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
__lowerCAmelCase = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
__lowerCAmelCase = dtype_byteorder + dtype_kind + str(_UpperCamelCase )
__lowerCAmelCase = np.dtype(_UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}" )
__lowerCAmelCase = PIL.Image.fromarray(array.astype(_UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
__lowerCAmelCase , __lowerCAmelCase = first_non_null_value(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCamelCase , np.ndarray ):
__lowerCAmelCase = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
elif isinstance(_UpperCamelCase , PIL.Image.Image ):
__lowerCAmelCase = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 57
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : Tuple = logging.get_logger(__name__)
A : Tuple = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
A : Optional[Any] = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location="cpu" )
return sd
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=rename_keys_prefix ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict()
__lowerCAmelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__lowerCAmelCase = key
for name_pair in rename_keys_prefix:
__lowerCAmelCase = new_key.replace(name_pair[0] , name_pair[1] )
__lowerCAmelCase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__lowerCAmelCase = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."
# Get Config
if "pre" in checkpoint_path:
__lowerCAmelCase = "pretraining"
if "vcr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f"No implementation found for `{checkpoint_path}`." )
else:
if "vcr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 512}
__lowerCAmelCase = "multichoice"
elif "vqa_advanced" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
__lowerCAmelCase = "vqa_advanced"
elif "vqa" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048, "num_labels": 3129}
__lowerCAmelCase = "vqa"
elif "nlvr" in checkpoint_path:
__lowerCAmelCase = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
__lowerCAmelCase = "nlvr"
__lowerCAmelCase = VisualBertConfig(**_UpperCamelCase )
# Load State Dict
__lowerCAmelCase = load_state_dict(_UpperCamelCase )
__lowerCAmelCase = get_new_dict(_UpperCamelCase , _UpperCamelCase )
if model_type == "pretraining":
__lowerCAmelCase = VisualBertForPreTraining(_UpperCamelCase )
elif model_type == "vqa":
__lowerCAmelCase = VisualBertForQuestionAnswering(_UpperCamelCase )
elif model_type == "nlvr":
__lowerCAmelCase = VisualBertForVisualReasoning(_UpperCamelCase )
elif model_type == "multichoice":
__lowerCAmelCase = VisualBertForMultipleChoice(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
# Save Checkpoints
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
A : Optional[int] = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 57
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : str = logging.get_logger(__name__)
A : List[Any] = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any ="""distilbert"""
__UpperCAmelCase : List[str] ={
"""hidden_size""": """dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
}
def __init__( self , __a=3_05_22 , __a=5_12 , __a=False , __a=6 , __a=12 , __a=7_68 , __a=4 * 7_68 , __a=0.1 , __a=0.1 , __a="gelu" , __a=0.0_2 , __a=0.1 , __a=0.2 , __a=0 , **__a , ):
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = sinusoidal_pos_embds
__lowerCAmelCase = n_layers
__lowerCAmelCase = n_heads
__lowerCAmelCase = dim
__lowerCAmelCase = hidden_dim
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation
__lowerCAmelCase = initializer_range
__lowerCAmelCase = qa_dropout
__lowerCAmelCase = seq_classif_dropout
super().__init__(**__a , pad_token_id=__a )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def snake_case ( self ):
if self.task == "multiple-choice":
__lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
__lowerCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 57
|
"""simple docstring"""
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
pass
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
pass
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = [
[],
[],
[],
]
def snake_case ( self , __a , __a ):
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(__a )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self ):
return "\n".join(f"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
if len(self.queue ) == 1_00:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(__a )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
__lowerCAmelCase = min(self.queue )
self.queue.remove(__a )
return data
def __str__( self ):
return str(self.queue )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 57
| 1
|
"""simple docstring"""
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Optional[int] = logging.get_logger(__name__)
A : Optional[Any] = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : int ="""xlnet"""
__UpperCAmelCase : List[str] =["""mems"""]
__UpperCAmelCase : Optional[Any] ={
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __a=3_20_00 , __a=10_24 , __a=24 , __a=16 , __a=40_96 , __a="gelu" , __a=True , __a="bi" , __a=0.0_2 , __a=1e-1_2 , __a=0.1 , __a=5_12 , __a=None , __a=True , __a=False , __a=False , __a=-1 , __a=False , __a="last" , __a=True , __a="tanh" , __a=0.1 , __a=5 , __a=5 , __a=5 , __a=1 , __a=2 , **__a , ):
__lowerCAmelCase = vocab_size
__lowerCAmelCase = d_model
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})" )
__lowerCAmelCase = d_model // n_head
__lowerCAmelCase = ff_activation
__lowerCAmelCase = d_inner
__lowerCAmelCase = untie_r
__lowerCAmelCase = attn_type
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = dropout
__lowerCAmelCase = mem_len
__lowerCAmelCase = reuse_len
__lowerCAmelCase = bi_data
__lowerCAmelCase = clamp_len
__lowerCAmelCase = same_length
__lowerCAmelCase = summary_type
__lowerCAmelCase = summary_use_proj
__lowerCAmelCase = summary_activation
__lowerCAmelCase = summary_last_dropout
__lowerCAmelCase = start_n_top
__lowerCAmelCase = end_n_top
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead." , __a , )
__lowerCAmelCase = kwargs["use_cache"]
__lowerCAmelCase = use_mems_eval
__lowerCAmelCase = use_mems_train
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
@property
def snake_case ( self ):
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def snake_case ( self , __a ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 57
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__lowerCAmelCase = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
| 1
|
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = 1 , _UpperCamelCase = 1.0e4 , _UpperCamelCase = False , _UpperCamelCase = 1.0 , ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"Embedding dimension {embedding_dim} should be even"
__lowerCAmelCase = float(embedding_dim // 2 )
__lowerCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
__lowerCAmelCase = min_timescale * jnp.exp(jnp.arange(_UpperCamelCase , dtype=jnp.floataa ) * -log_timescale_increment )
__lowerCAmelCase = jnp.expand_dims(_UpperCamelCase , 1 ) * jnp.expand_dims(_UpperCamelCase , 0 )
# scale embeddings
__lowerCAmelCase = scale * emb
if flip_sin_to_cos:
__lowerCAmelCase = jnp.concatenate([jnp.cos(_UpperCamelCase ), jnp.sin(_UpperCamelCase )] , axis=1 )
else:
__lowerCAmelCase = jnp.concatenate([jnp.sin(_UpperCamelCase ), jnp.cos(_UpperCamelCase )] , axis=1 )
__lowerCAmelCase = jnp.reshape(_UpperCamelCase , [jnp.shape(_UpperCamelCase )[0], embedding_dim] )
return signal
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
__UpperCAmelCase : int =3_2
__UpperCAmelCase : jnp.dtype =jnp.floataa
@nn.compact
def __call__( self , __a ):
__lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(__a )
__lowerCAmelCase = nn.silu(__a )
__lowerCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(__a )
return temb
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
__UpperCAmelCase : int =3_2
__UpperCAmelCase : bool =False
__UpperCAmelCase : float =1
@nn.compact
def __call__( self , __a ):
return get_sinusoidal_embeddings(
__a , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 57
|
"""simple docstring"""
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = 1
while len(_UpperCamelCase ) < 1e6:
constant.append(str(_UpperCamelCase ) )
i += 1
__lowerCAmelCase = "".join(_UpperCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 57
| 1
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
A : Any = logging.get_logger(__name__)
A : int = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
A : Union[str, Any] = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
A : Any = {"facebook/blenderbot-3B": 1_2_8}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : int =VOCAB_FILES_NAMES
__UpperCAmelCase : str =PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Union[str, Any] =["""input_ids""", """attention_mask"""]
__UpperCAmelCase : List[str] =BlenderbotTokenizer
def __init__( self , __a=None , __a=None , __a=None , __a="replace" , __a="<s>" , __a="</s>" , __a="</s>" , __a="<s>" , __a="<unk>" , __a="<pad>" , __a="<mask>" , __a=False , __a=True , **__a , ):
super().__init__(
__a , __a , tokenizer_file=__a , errors=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , trim_offsets=__a , **__a , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __a ) != add_prefix_space:
__lowerCAmelCase = getattr(__a , pre_tok_state.pop("type" ) )
__lowerCAmelCase = add_prefix_space
__lowerCAmelCase = pre_tok_class(**__a )
__lowerCAmelCase = add_prefix_space
__lowerCAmelCase = "post_processor"
__lowerCAmelCase = getattr(self.backend_tokenizer , __a , __a )
if tokenizer_component_instance:
__lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowerCAmelCase = tuple(state["sep"] )
if "cls" in state:
__lowerCAmelCase = tuple(state["cls"] )
__lowerCAmelCase = False
if state.get("add_prefix_space" , __a ) != add_prefix_space:
__lowerCAmelCase = add_prefix_space
__lowerCAmelCase = True
if state.get("trim_offsets" , __a ) != trim_offsets:
__lowerCAmelCase = trim_offsets
__lowerCAmelCase = True
if changes_to_apply:
__lowerCAmelCase = getattr(__a , state.pop("type" ) )
__lowerCAmelCase = component_class(**__a )
setattr(self.backend_tokenizer , __a , __a )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def snake_case ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case ( self , __a ):
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else value
__lowerCAmelCase = value
def snake_case ( self , *__a , **__a ):
__lowerCAmelCase = kwargs.get("is_split_into_words" , __a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__a , **__a )
def snake_case ( self , *__a , **__a ):
__lowerCAmelCase = kwargs.get("is_split_into_words" , __a )
assert self.add_prefix_space or not is_split_into_words, (
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__a , **__a )
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case ( self , __a , __a = None ):
return token_ids_a + [self.eos_token_id]
def snake_case ( self , __a ):
__lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__a )
__lowerCAmelCase = " ".join(__a )
__lowerCAmelCase = self.encode(__a )
if len(__a ) > self.model_max_length:
__lowerCAmelCase = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids
| 57
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A : Union[str, Any] = imread(R"digital_image_processing/image_data/lena_small.jpg")
A : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = cn.convert_to_negative(_UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
'''simple docstring'''
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCamelCase , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase = canny.canny(_UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert gg.gaussian_filter(_UpperCamelCase , 5 , sigma=0.9 ).all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCAmelCase = conv.img_convolve(_UpperCamelCase , _UpperCamelCase ).astype(_UpperCamelCase )
assert res.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert med.median_filter(_UpperCamelCase , 3 ).any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(_UpperCamelCase )
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = sp.make_sepia(_UpperCamelCase , 20 )
assert sepia.all()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
__lowerCAmelCase = bs.Burkes(imread(_UpperCamelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
__lowerCAmelCase = rs.NearestNeighbour(imread(_UpperCamelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
__lowerCAmelCase = imread(_UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = image[x_coordinate][y_coordinate]
__lowerCAmelCase = lbp.get_neighbors_pixel(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__lowerCAmelCase = lbp.local_binary_value(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert lbp_image.any()
| 57
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A : Dict = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Optional[int] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 1
|
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
A : Optional[int] = "\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n"
A : int = "\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n"
A : Optional[Any] = "\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def snake_case ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def snake_case ( self , __a , __a , __a=None , __a=1 , __a="binary" , __a=None ):
__lowerCAmelCase = fa_score(
__a , __a , labels=__a , pos_label=__a , average=__a , sample_weight=__a )
return {"f1": float(__a ) if score.size == 1 else score}
| 57
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
A : Dict = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[float] =field(
default=0.0 ,metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
__UpperCAmelCase : bool =field(default=lowerCAmelCase__ ,metadata={"""help""": """Whether to SortishSamler or not."""} )
__UpperCAmelCase : bool =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
__UpperCAmelCase : bool =field(default=lowerCAmelCase__ ,metadata={"""help""": """whether to use adafactor"""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(default=lowerCAmelCase__ ,metadata={"""help""": """Dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[str] =field(
default="""linear""" ,metadata={"""help""": F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} ,)
| 57
| 1
|
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return (data["data"], data["target"])
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = XGBClassifier()
classifier.fit(_UpperCamelCase , _UpperCamelCase )
return classifier
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = load_iris()
__lowerCAmelCase , __lowerCAmelCase = data_handling(_UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = train_test_split(
_UpperCamelCase , _UpperCamelCase , test_size=0.25 )
__lowerCAmelCase = iris["target_names"]
# Create an XGBoost Classifier from the training data
__lowerCAmelCase = xgboost(_UpperCamelCase , _UpperCamelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , display_labels=_UpperCamelCase , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 57
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
A : Any = "examples/"
A : Optional[Any] = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
A : Optional[int] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
A : List[Any] = "README.md"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase , __lowerCAmelCase = REPLACE_PATTERNS[pattern]
__lowerCAmelCase = replace.replace("VERSION" , _UpperCamelCase )
__lowerCAmelCase = re_pattern.sub(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for folder, directories, fnames in os.walk(_UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , pattern="examples" )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not patch:
update_version_in_examples(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "🤗 Transformers currently provides the following architectures"
__lowerCAmelCase = "1. Want to contribute a new model?"
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.readlines()
# Find the start of the list.
__lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
__lowerCAmelCase = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["init"] , "r" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(_UpperCamelCase ).groups()[0]
return packaging.version.parse(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase=False ):
'''simple docstring'''
__lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
__lowerCAmelCase = default_version.base_version
elif patch:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__lowerCAmelCase = input(f"Which version are you releasing? [{default_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = default_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase , patch=_UpperCamelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = get_version()
__lowerCAmelCase = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
__lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
__lowerCAmelCase = input(f"Which version are we developing now? [{dev_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = dev_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
A : Dict = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 57
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A : str = logging.get_logger(__name__)
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
__lowerCAmelCase = 1024
__lowerCAmelCase = 4096
__lowerCAmelCase = 24
__lowerCAmelCase = 16
__lowerCAmelCase = [5, 11, 17, 23]
__lowerCAmelCase = [256, 512, 1024, 1024]
__lowerCAmelCase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
__lowerCAmelCase = 768
__lowerCAmelCase = [1, 1, 1, 0.5]
__lowerCAmelCase = [256, 512, 768, 768]
__lowerCAmelCase = 150
__lowerCAmelCase = 16
__lowerCAmelCase = (1, 384, 384)
__lowerCAmelCase = False
__lowerCAmelCase = "project"
if "ade" in checkpoint_url:
__lowerCAmelCase = True
__lowerCAmelCase = 768
__lowerCAmelCase = [1, 1, 1, 0.5]
__lowerCAmelCase = 150
__lowerCAmelCase = 16
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = "ade20k-id2label.json"
__lowerCAmelCase = json.load(open(cached_download(hf_hub_url(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) ) , "r" ) )
__lowerCAmelCase = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = [1, 150, 480, 480]
return config, expected_shape
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__lowerCAmelCase = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
__lowerCAmelCase = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
__lowerCAmelCase = name.replace("patch_embed" , "" )
if "pos_embed" in name:
__lowerCAmelCase = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
__lowerCAmelCase = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
__lowerCAmelCase = name.replace("proj" , "projection" )
if "blocks" in name:
__lowerCAmelCase = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
__lowerCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__lowerCAmelCase = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name and "backbone" not in name:
__lowerCAmelCase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name and "backbone" not in name:
__lowerCAmelCase = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
__lowerCAmelCase = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
__lowerCAmelCase = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
__lowerCAmelCase = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
__lowerCAmelCase = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
__lowerCAmelCase = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
__lowerCAmelCase = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
__lowerCAmelCase = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__lowerCAmelCase = name.replace(f"refinenet{layer_idx}" , f"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
__lowerCAmelCase = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
__lowerCAmelCase = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
__lowerCAmelCase = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
__lowerCAmelCase = name.replace("conv1" , "convolution1" )
if "conv2" in name:
__lowerCAmelCase = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__lowerCAmelCase = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
__lowerCAmelCase = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
__lowerCAmelCase = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
__lowerCAmelCase = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__lowerCAmelCase = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
__lowerCAmelCase = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
__lowerCAmelCase = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
__lowerCAmelCase = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
__lowerCAmelCase = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
__lowerCAmelCase = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
__lowerCAmelCase = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
__lowerCAmelCase = name.replace("pretrained" , "dpt" )
if "bn" in name:
__lowerCAmelCase = name.replace("bn" , "batch_norm" )
if "head" in name:
__lowerCAmelCase = name.replace("head" , "head.head" )
if "encoder.norm" in name:
__lowerCAmelCase = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
__lowerCAmelCase = name.replace("auxlayer" , "auxiliary_head.head" )
if "backbone" in name:
__lowerCAmelCase = name.replace("backbone" , "backbone.bit.encoder" )
if ".." in name:
__lowerCAmelCase = name.replace(".." , "." )
if "stem.conv" in name:
__lowerCAmelCase = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
__lowerCAmelCase = name.replace("blocks" , "layers" )
if "convolution" in name and "backbone" in name:
__lowerCAmelCase = name.replace("convolution" , "conv" )
if "layer" in name and "backbone" in name:
__lowerCAmelCase = name.replace("layer" , "layers" )
if "backbone.bit.encoder.bit" in name:
__lowerCAmelCase = name.replace("backbone.bit.encoder.bit" , "backbone.bit" )
if "embedder.conv" in name:
__lowerCAmelCase = name.replace("embedder.conv" , "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
__lowerCAmelCase = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" )
return name
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.weight" )
__lowerCAmelCase = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[: config.hidden_size, :]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = get_dpt_config(_UpperCamelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(_UpperCamelCase )
# rename keys
for key in state_dict.copy().keys():
__lowerCAmelCase = state_dict.pop(_UpperCamelCase )
__lowerCAmelCase = val
# read in qkv matrices
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase )
# load HuggingFace model
__lowerCAmelCase = DPTForSemanticSegmentation(_UpperCamelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
# Check outputs on an image
__lowerCAmelCase = 480 if "ade" in checkpoint_url else 384
__lowerCAmelCase = DPTImageProcessor(size=_UpperCamelCase )
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(_UpperCamelCase , return_tensors="pt" )
# forward pass
__lowerCAmelCase = model(**_UpperCamelCase ).logits if "ade" in checkpoint_url else model(**_UpperCamelCase ).predicted_depth
if show_prediction:
__lowerCAmelCase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=_UpperCamelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCamelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
parser.add_argument(
"--show_prediction",
action="store_true",
)
A : str = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 57
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Tuple = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 1
|
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _UpperCamelCase :
'''simple docstring'''
pass
| 57
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase = 6008_5147_5143 ):
'''simple docstring'''
try:
__lowerCAmelCase = int(_UpperCamelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__lowerCAmelCase = 2
__lowerCAmelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__lowerCAmelCase = i
while n % i == 0:
__lowerCAmelCase = n // i
i += 1
return int(_UpperCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 57
| 1
|
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __a = 16 , __a = 88 , __a = None , __a = 1 , __a = 0.0 , __a = 32 , __a = None , __a = False , __a = None , __a = None , __a = "geglu" , __a = None , ):
super().__init__()
__lowerCAmelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__a , attention_head_dim=__a , in_channels=__a , num_layers=__a , dropout=__a , norm_num_groups=__a , cross_attention_dim=__a , attention_bias=__a , sample_size=__a , num_vector_embeds=__a , activation_fn=__a , num_embeds_ada_norm=__a , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__lowerCAmelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__lowerCAmelCase = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__lowerCAmelCase = [1, 0]
def snake_case ( self , __a , __a , __a=None , __a=None , __a=None , __a = True , ):
__lowerCAmelCase = hidden_states
__lowerCAmelCase = []
__lowerCAmelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
__lowerCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__lowerCAmelCase = self.transformer_index_for_condition[i]
__lowerCAmelCase = self.transformers[transformer_index](
__a , encoder_hidden_states=__a , timestep=__a , cross_attention_kwargs=__a , return_dict=__a , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
__lowerCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__lowerCAmelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__a )
| 57
|
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a=None , __a=True , __a=None , **__a ):
__lowerCAmelCase = parent
__lowerCAmelCase = config_class
__lowerCAmelCase = has_text_modality
__lowerCAmelCase = kwargs
__lowerCAmelCase = common_properties
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(__a , __a ) , msg=f"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(__a ):
try:
setattr(__a , __a , __a )
self.parent.assertEqual(
getattr(__a , __a ) , __a , msg=f"`{name} value {idx} expected, but was {getattr(__a , __a )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(__a ):
try:
__lowerCAmelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(__a , __a ) , __a , msg=f"`{name} value {idx} expected, but was {getattr(__a , __a )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , __a )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(__a , "config.json" )
config_first.to_json_file(__a )
__lowerCAmelCase = self.config_class.from_json_file(__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(__a )
__lowerCAmelCase = self.config_class.from_pretrained(__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(__a , __a )
config_first.save_pretrained(__a )
__lowerCAmelCase = self.config_class.from_pretrained(__a , subfolder=__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__lowerCAmelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def snake_case ( self ):
if self.config_class.is_composition:
return
__lowerCAmelCase = self.config_class()
self.parent.assertIsNotNone(__a )
def snake_case ( self ):
__lowerCAmelCase = copy.deepcopy(__a )
__lowerCAmelCase = self.config_class(**__a )
__lowerCAmelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(__a , __a ) != value:
wrong_values.append((key, getattr(__a , __a ), value) )
if len(__a ) > 0:
__lowerCAmelCase = "\n".join([f"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(f"The following keys were not properly set in the config:\n{errors}" )
def snake_case ( self ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 57
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
A : Any = "2020.9.26"
A : List[str] = "xcodz-dot, cclaus, dhruvmanila"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if not all(isinstance(_UpperCamelCase , (float, int) ) for val in locals().values() ):
__lowerCAmelCase = f"Input values must either be float or int: {list(locals().values() )}"
raise TypeError(_UpperCamelCase )
__lowerCAmelCase = ((x * distance) / (z + distance)) * scale
__lowerCAmelCase = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError("Axis must be a str" )
__lowerCAmelCase = locals()
del input_variables["axis"]
if not all(isinstance(_UpperCamelCase , (float, int) ) for val in input_variables.values() ):
__lowerCAmelCase = (
"Input values except axis must either be float or int: "
f"{list(input_variables.values() )}"
)
raise TypeError(_UpperCamelCase )
__lowerCAmelCase = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
__lowerCAmelCase = x * math.cos(_UpperCamelCase ) - y * math.sin(_UpperCamelCase )
__lowerCAmelCase = y * math.cos(_UpperCamelCase ) + x * math.sin(_UpperCamelCase )
__lowerCAmelCase = z
elif axis == "x":
__lowerCAmelCase = y * math.cos(_UpperCamelCase ) - z * math.sin(_UpperCamelCase )
__lowerCAmelCase = z * math.cos(_UpperCamelCase ) + y * math.sin(_UpperCamelCase )
__lowerCAmelCase = x
elif axis == "y":
__lowerCAmelCase = x * math.cos(_UpperCamelCase ) - z * math.sin(_UpperCamelCase )
__lowerCAmelCase = z * math.cos(_UpperCamelCase ) + x * math.sin(_UpperCamelCase )
__lowerCAmelCase = y
else:
raise ValueError("not a valid axis, choose one of 'x', 'y', 'z'" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }''')
print(f'''{rotate(1.0, 2.0, 3.0, "y", 90.0) = }''')
| 57
|
"""simple docstring"""
A : int = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 57
| 1
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return "".join(chr(ord(_UpperCamelCase ) - 32 ) if "a" <= char <= "z" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 57
|
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A : str = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =["""input_ids""", """attention_mask"""]
def __init__( self , __a="</s>" , __a="<unk>" , __a="<pad>" , __a=1_25 , __a=None , **__a , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__lowerCAmelCase = [f"<extra_id_{i}>" for i in range(__a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__lowerCAmelCase = len(set(filter(lambda __a : bool("extra_id" in str(__a ) ) , __a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
super().__init__(
eos_token=__a , unk_token=__a , pad_token=__a , extra_ids=__a , additional_special_tokens=__a , **__a , )
__lowerCAmelCase = extra_ids
__lowerCAmelCase = 2**8 # utf is 8 bits
# define special tokens dict
__lowerCAmelCase = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__lowerCAmelCase = len(self.special_tokens_encoder )
__lowerCAmelCase = len(__a )
for i, token in enumerate(__a ):
__lowerCAmelCase = self.vocab_size + i - n
__lowerCAmelCase = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def snake_case ( self ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def snake_case ( self , __a , __a = None , __a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__a )) + [1]
return ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
def snake_case ( self , __a ):
if len(__a ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = self._add_eos_if_not_present(__a )
if token_ids_a is None:
return token_ids_a
else:
__lowerCAmelCase = self._add_eos_if_not_present(__a )
return token_ids_a + token_ids_a
def snake_case ( self , __a ):
__lowerCAmelCase = [chr(__a ) for i in text.encode("utf-8" )]
return tokens
def snake_case ( self , __a ):
if token in self.special_tokens_encoder:
__lowerCAmelCase = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__lowerCAmelCase = self.added_tokens_encoder[token]
elif len(__a ) != 1:
__lowerCAmelCase = self.unk_token_id
else:
__lowerCAmelCase = ord(__a ) + self._num_special_tokens
return token_id
def snake_case ( self , __a ):
if index in self.special_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[index]
else:
__lowerCAmelCase = chr(index - self._num_special_tokens )
return token
def snake_case ( self , __a ):
__lowerCAmelCase = B""
for token in tokens:
if token in self.special_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
__lowerCAmelCase = token.encode("utf-8" )
elif token in self.added_tokens_encoder:
__lowerCAmelCase = token.encode("utf-8" )
else:
__lowerCAmelCase = bytes([ord(__a )] )
bstring += tok_string
__lowerCAmelCase = bstring.decode("utf-8" , errors="ignore" )
return string
def snake_case ( self , __a , __a = None ):
return ()
| 57
| 1
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
assert (
isinstance(_UpperCamelCase , _UpperCamelCase ) and number_of_steps > 0
), f"number_of_steps needs to be positive integer, your input {number_of_steps}"
if number_of_steps == 1:
return 1
__lowerCAmelCase , __lowerCAmelCase = 1, 1
for _ in range(number_of_steps - 1 ):
__lowerCAmelCase , __lowerCAmelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
|
"""simple docstring"""
import numpy
# List of input, output pairs
A : Any = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
A : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
A : Union[str, Any] = [2, 4, 1, 5]
A : int = len(train_data)
A : Dict = 0.009
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase="train" ):
'''simple docstring'''
return calculate_hypothesis_value(_UpperCamelCase , _UpperCamelCase ) - output(
_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
for i in range(len(_UpperCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=m ):
'''simple docstring'''
__lowerCAmelCase = 0
for i in range(_UpperCamelCase ):
if index == -1:
summation_value += _error(_UpperCamelCase )
else:
summation_value += _error(_UpperCamelCase ) * train_data[i][0][index]
return summation_value
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = summation_of_cost_derivative(_UpperCamelCase , _UpperCamelCase ) / m
return cost_derivative_value
def _lowerCamelCase ( ):
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__lowerCAmelCase = 0.00_00_02
__lowerCAmelCase = 0
__lowerCAmelCase = 0
while True:
j += 1
__lowerCAmelCase = [0, 0, 0, 0]
for i in range(0 , len(_UpperCamelCase ) ):
__lowerCAmelCase = get_cost_derivative(i - 1 )
__lowerCAmelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_UpperCamelCase , _UpperCamelCase , atol=_UpperCamelCase , rtol=_UpperCamelCase , ):
break
__lowerCAmelCase = temp_parameter_vector
print(("Number of iterations:", j) )
def _lowerCamelCase ( ):
'''simple docstring'''
for i in range(len(_UpperCamelCase ) ):
print(("Actual output value:", output(_UpperCamelCase , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(_UpperCamelCase , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 57
| 1
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
A : Any = "examples/"
A : Optional[Any] = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
A : Optional[int] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
A : List[Any] = "README.md"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase , __lowerCAmelCase = REPLACE_PATTERNS[pattern]
__lowerCAmelCase = replace.replace("VERSION" , _UpperCamelCase )
__lowerCAmelCase = re_pattern.sub(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for folder, directories, fnames in os.walk(_UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , pattern="examples" )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not patch:
update_version_in_examples(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "🤗 Transformers currently provides the following architectures"
__lowerCAmelCase = "1. Want to contribute a new model?"
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.readlines()
# Find the start of the list.
__lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
__lowerCAmelCase = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["init"] , "r" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(_UpperCamelCase ).groups()[0]
return packaging.version.parse(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase=False ):
'''simple docstring'''
__lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
__lowerCAmelCase = default_version.base_version
elif patch:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__lowerCAmelCase = input(f"Which version are you releasing? [{default_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = default_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase , patch=_UpperCamelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = get_version()
__lowerCAmelCase = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
__lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
__lowerCAmelCase = input(f"Which version are we developing now? [{dev_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = dev_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
A : Dict = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 57
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__lowerCAmelCase = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
"do_convert_rgb": True,
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__a , __a )
def snake_case ( self , **__a ):
return BertTokenizer.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , **__a ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , **__a ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __a )
self.assertIsInstance(processor_fast.tokenizer , __a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __a )
self.assertIsInstance(processor_fast.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
__lowerCAmelCase = self.get_image_processor(do_normalize=__a )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__a , return_tensors="np" )
__lowerCAmelCase = processor(images=__a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = processor(text=__a )
__lowerCAmelCase = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__a )
__lowerCAmelCase = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 57
| 1
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""image_processor""", """tokenizer"""]
__UpperCAmelCase : Optional[Any] ="""CLIPImageProcessor"""
__UpperCAmelCase : Union[str, Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self , __a=None , __a=None , **__a ):
__lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
__lowerCAmelCase = kwargs.pop("feature_extractor" )
__lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self , __a=None , __a=None , __a=None , **__a ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__lowerCAmelCase = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
__lowerCAmelCase = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
__lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.decode(*__a , **__a )
@property
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 57
|
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( _UpperCamelCase = 4 ):
'''simple docstring'''
__lowerCAmelCase = abs(_UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(_UpperCamelCase )] for y in range(_UpperCamelCase )]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_row(transpose(_UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_row(reverse_column(_UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_column(transpose(_UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [list(_UpperCamelCase ) for x in zip(*_UpperCamelCase )]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = matrix[::-1]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [x[::-1] for x in matrix]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for i in matrix:
print(*_UpperCamelCase )
if __name__ == "__main__":
A : Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
A : List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
A : str = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 57
| 1
|
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
A : str = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] =CLIPConfig
__UpperCAmelCase : Dict =["""CLIPEncoderLayer"""]
def __init__( self , __a ):
super().__init__(__a )
__lowerCAmelCase = CLIPVisionModelWithProjection(config.vision_config )
__lowerCAmelCase = nn.Linear(config.vision_config.projection_dim , 1 )
__lowerCAmelCase = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def snake_case ( self , __a , __a , __a=0.5 , __a=0.5 ):
__lowerCAmelCase = self.vision_model(__a )[0]
__lowerCAmelCase = self.p_head(__a )
__lowerCAmelCase = nsfw_detected.flatten()
__lowerCAmelCase = nsfw_detected > p_threshold
__lowerCAmelCase = nsfw_detected.tolist()
if any(__a ):
logger.warning(
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, nsfw_detected_ in enumerate(__a ):
if nsfw_detected_:
__lowerCAmelCase = np.zeros(images[idx].shape )
__lowerCAmelCase = self.w_head(__a )
__lowerCAmelCase = watermark_detected.flatten()
__lowerCAmelCase = watermark_detected > w_threshold
__lowerCAmelCase = watermark_detected.tolist()
if any(__a ):
logger.warning(
"Potential watermarked content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, watermark_detected_ in enumerate(__a ):
if watermark_detected_:
__lowerCAmelCase = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 57
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase : Union[str, Any] =TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = TextaTextGenerationPipeline(model=__a , tokenizer=__a )
return generator, ["Something to write", "Something else"]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = generator("Something there" )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) )
__lowerCAmelCase = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
__lowerCAmelCase = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
with self.assertRaises(__a ):
generator(4 )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
__lowerCAmelCase = 3
__lowerCAmelCase = generator(
"Something there" , num_return_sequences=__a , num_beams=__a , )
__lowerCAmelCase = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(__a , __a )
__lowerCAmelCase = generator("This is a test" , do_sample=__a , num_return_sequences=2 , return_tensors=__a )
self.assertEqual(
__a , [
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
] , )
__lowerCAmelCase = generator.model.config.eos_token_id
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = generator(
["This is a test", "This is a second test"] , do_sample=__a , num_return_sequences=2 , batch_size=2 , return_tensors=__a , )
self.assertEqual(
__a , [
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
] , )
@require_tf
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
| 57
| 1
|
"""simple docstring"""
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class _UpperCamelCase ( lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[str] =WavaVecaPhonemeCTCTokenizer
__UpperCAmelCase : Union[str, Any] =False
def snake_case ( self ):
super().setUp()
__lowerCAmelCase = (
"<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː "
"ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː "
"ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 "
"oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ "
"pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ "
"yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ "
"əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ "
"ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ "
"ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ "
"uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ "
"ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ "
"ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ "
"ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"
).split(" " )
__lowerCAmelCase = dict(zip(__a , range(len(__a ) ) ) )
__lowerCAmelCase = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
def snake_case ( self , __a , __a=False , __a=20 , __a=5 ):
__lowerCAmelCase = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=__a )) for i in range(len(__a ) )]
__lowerCAmelCase = list(filter(lambda __a : [t[0]] == tokenizer.encode(t[1] , do_phonemize=__a ) , __a ) )
if max_length is not None and len(__a ) > max_length:
__lowerCAmelCase = toks[:max_length]
if min_length is not None and len(__a ) < min_length and len(__a ) > 0:
while len(__a ) < min_length:
__lowerCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
__lowerCAmelCase = [t[0] for t in toks]
# Ensure consistency
__lowerCAmelCase = tokenizer.decode(__a , clean_up_tokenization_spaces=__a )
if " " not in output_txt and len(__a ) > 1:
__lowerCAmelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__a )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__a )
)
if with_prefix_space:
__lowerCAmelCase = " " + output_txt
__lowerCAmelCase = tokenizer.encode(__a , add_special_tokens=__a )
return output_txt, output_ids
def snake_case ( self , **__a ):
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
# check adding a single token
tokenizer.add_tokens("xxx" )
__lowerCAmelCase = tokenizer("m xxx ɪ" , do_phonemize=__a ).input_ids
self.assertEqual(__a , [13, 3_92, 17] ) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"] )
__lowerCAmelCase = tokenizer("m aaa ɪ ccc" , do_phonemize=__a ).input_ids
self.assertEqual(__a , [13, 3_93, 17, 3_95] ) # aaa and ccc should be after xxx and 2 after aaa
__lowerCAmelCase = tokenizer("maɪ c" , do_phonemize=__a ).input_ids
self.assertEqual(__a , [3, 2_00] ) # mai should be <unk> (=3)
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__lowerCAmelCase = "Hello how are you"
__lowerCAmelCase = tokenizer.phonemize(__a , phonemizer_lang="en-us" )
self.assertEqual(__a , "h ə l oʊ h aʊ ɑːɹ j uː" )
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__lowerCAmelCase = "Hello how are you"
__lowerCAmelCase = tokenizer.phonemize(__a , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(__a ).input_ids , tokenizer(__a , do_phonemize=__a ).input_ids )
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__lowerCAmelCase = "Hello how are you"
__lowerCAmelCase = tokenizer.phonemize(__a , phonemizer_lang="en-us" )
__lowerCAmelCase = tokenizer.decode(tokenizer(__a ).input_ids )
self.assertEqual(__a , __a )
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__lowerCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
__lowerCAmelCase = tokenizer.decode(sample_ids[0] )
__lowerCAmelCase = tokenizer.batch_decode(__a )
self.assertEqual(__a , batch_tokens[0] )
self.assertEqual(__a , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
__lowerCAmelCase = "Hello how are you"
__lowerCAmelCase = tokenizer.phonemize(__a , phonemizer_lang="en-us" )
self.assertEqual(__a , "h ə l oʊ | h aʊ | ɑːɹ | j uː |" )
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
__lowerCAmelCase = "Hello how are you"
__lowerCAmelCase = tokenizer.phonemize(__a , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(__a ).input_ids , tokenizer(__a , do_phonemize=__a ).input_ids )
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
__lowerCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
__lowerCAmelCase = tokenizer.decode(sample_ids[0] )
__lowerCAmelCase = tokenizer.batch_decode(__a )
self.assertEqual(__a , batch_tokens[0] )
self.assertEqual(__a , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
# decode with no word_del_token filter
__lowerCAmelCase = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=__a )
__lowerCAmelCase = tokenizer.batch_decode(__a , filter_word_delimiter_token=__a )
self.assertEqual(__a , batch_tokens[0] )
self.assertEqual(__a , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] )
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
__lowerCAmelCase = "Hello how are you"
__lowerCAmelCase = tokenizer.phonemize(__a , phonemizer_lang="en-us" )
__lowerCAmelCase = tokenizer.decode(tokenizer(__a ).input_ids , filter_word_delimiter_token=__a )
self.assertEqual(__a , __a )
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
__lowerCAmelCase = "Hello how are you"
__lowerCAmelCase = tokenizer.phonemize(__a , phonemizer_lang="en-us" )
__lowerCAmelCase = tokenizer.decode(tokenizer(__a ).input_ids , filter_word_delimiter_token=__a )
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip() , __a )
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=__a )
__lowerCAmelCase = "Hello how are you"
__lowerCAmelCase = tokenizer(__a , phonemizer_lang="en-us" ).input_ids
__lowerCAmelCase = tokenizer(__a , phonemizer_lang="fr-fr" ).input_ids
self.assertNotEqual(__a , __a )
__lowerCAmelCase = tokenizer.decode(__a )
__lowerCAmelCase = tokenizer.decode(__a )
self.assertEqual(__a , "h ə l oʊ h aʊ ɑːɹ j uː" )
self.assertEqual(__a , "ɛ l o h aʊ a ʁ j u" )
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
__lowerCAmelCase = "Hello how Are you"
__lowerCAmelCase = "hello how are you"
__lowerCAmelCase = tokenizer(__a ).input_ids
__lowerCAmelCase = tokenizer(__a ).input_ids
self.assertEqual(__a , __a )
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
tokenizer.add_tokens(["!", "?"] )
tokenizer.add_special_tokens({"cls_token": "$$$"} )
# fmt: off
__lowerCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 3_92, 3_92, 3_93, 3_92, 3_92, 3_93, 3_94, 3_94],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 3_94, 3_94],
]
# fmt: on
__lowerCAmelCase = tokenizer.batch_decode(__a )
self.assertEqual(__a , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] )
@staticmethod
def snake_case ( __a , __a ):
__lowerCAmelCase = [d[key] for d in offsets]
return retrieved_list
def snake_case ( self ):
__lowerCAmelCase = self.get_tokenizer(word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
__lowerCAmelCase = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
__lowerCAmelCase = tokenizer.decode(__a , output_char_offsets=__a , filter_word_delimiter_token=__a )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("text" in outputs )
self.assertTrue("char_offsets" in outputs )
self.assertTrue(isinstance(__a , __a ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "char" ) , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "start_offset" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "end_offset" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def snake_case ( self ):
__lowerCAmelCase = self.get_tokenizer(word_delimiter_token="|" )
def check_list_tuples_equal(__a , __a ):
self.assertTrue(isinstance(__a , __a ) )
self.assertTrue(isinstance(outputs_list[0] , __a ) )
# transform list to ModelOutput
__lowerCAmelCase = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"] )
def recursive_check(__a , __a ):
if isinstance(__a , __a ):
[recursive_check(__a , __a ) for la, la in zip(__a , __a )]
self.assertEqual(__a , __a )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"] )
# fmt: off
__lowerCAmelCase = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
__lowerCAmelCase = tokenizer.batch_decode(__a , output_char_offsets=__a )
__lowerCAmelCase = [tokenizer.decode(__a , output_char_offsets=__a ) for ids in sample_ids]
check_list_tuples_equal(__a , __a )
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" )
def snake_case ( self ):
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" )
def snake_case ( self ):
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" )
def snake_case ( self ):
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" )
def snake_case ( self ):
pass
def snake_case ( self ):
__lowerCAmelCase = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__lowerCAmelCase = tokenizer.vocab_size
__lowerCAmelCase = len(__a )
self.assertNotEqual(__a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__lowerCAmelCase = ["aaaaa bbbbbb", "cccccccccdddddddd"]
__lowerCAmelCase = tokenizer.add_tokens(__a )
__lowerCAmelCase = tokenizer.vocab_size
__lowerCAmelCase = len(__a )
self.assertNotEqual(__a , 0 )
self.assertEqual(__a , __a )
self.assertEqual(__a , len(__a ) )
self.assertEqual(__a , all_size + len(__a ) )
__lowerCAmelCase = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=__a )
self.assertGreaterEqual(len(__a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__lowerCAmelCase = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
__lowerCAmelCase = tokenizer.add_special_tokens(__a )
__lowerCAmelCase = tokenizer.vocab_size
__lowerCAmelCase = len(__a )
self.assertNotEqual(__a , 0 )
self.assertEqual(__a , __a )
self.assertEqual(__a , len(__a ) )
self.assertEqual(__a , all_size_a + len(__a ) )
__lowerCAmelCase = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=__a )
self.assertGreaterEqual(len(__a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def snake_case ( self ):
pass
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def snake_case ( self ):
pass
def snake_case ( self ):
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
__lowerCAmelCase = self.get_tokenizers(fast=__a , do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__lowerCAmelCase = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"]
__lowerCAmelCase = tokenizer.convert_tokens_to_string(__a )
self.assertIsInstance(output["text"] , __a )
| 57
|
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCamelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCAmelCase = model
__lowerCAmelCase = 2
__lowerCAmelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def snake_case ( self ):
pass
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = LongformerModel.from_pretrained(_UpperCamelCase )
__lowerCAmelCase = LightningModel(_UpperCamelCase )
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
__lowerCAmelCase = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 57
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Optional[int] = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
__lowerCAmelCase = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
A : Union[str, Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
A : str = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 57
| 1
|
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
A : Dict = re.compile(R"([A-Z]+)([A-Z][a-z])")
A : Union[str, Any] = re.compile(R"([a-z\d])([A-Z])")
A : Any = re.compile(R"(?<!_)_(?!_)")
A : List[Any] = re.compile(R"(_{2,})")
A : Union[str, Any] = R"^\w+(\.\w+)*$"
A : Union[str, Any] = R"<>:/\|?*"
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = _uppercase_uppercase_re.sub(R"\1_\2" , _UpperCamelCase )
__lowerCAmelCase = _lowercase_uppercase_re.sub(R"\1_\2" , _UpperCamelCase )
return name.lower()
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = _single_underscore_re.split(_UpperCamelCase )
__lowerCAmelCase = [_multiple_underscores_re.split(_UpperCamelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_UpperCamelCase ) if n != "" )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if os.path.basename(_UpperCamelCase ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if os.path.basename(_UpperCamelCase ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , _UpperCamelCase ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(_UpperCamelCase )}-{split}"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
'''simple docstring'''
__lowerCAmelCase = filename_prefix_for_split(_UpperCamelCase , _UpperCamelCase )
if filetype_suffix:
prefix += f".{filetype_suffix}"
__lowerCAmelCase = os.path.join(_UpperCamelCase , _UpperCamelCase )
return f"{filepath}*"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ):
'''simple docstring'''
__lowerCAmelCase = filename_prefix_for_split(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = os.path.join(_UpperCamelCase , _UpperCamelCase )
if shard_lengths:
__lowerCAmelCase = len(_UpperCamelCase )
__lowerCAmelCase = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(_UpperCamelCase )]
if filetype_suffix:
__lowerCAmelCase = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
__lowerCAmelCase = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 57
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""image_processor""", """tokenizer"""]
__UpperCAmelCase : Optional[Any] ="""CLIPImageProcessor"""
__UpperCAmelCase : Union[str, Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self , __a=None , __a=None , **__a ):
__lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
__lowerCAmelCase = kwargs.pop("feature_extractor" )
__lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self , __a=None , __a=None , __a=None , **__a ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__lowerCAmelCase = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
__lowerCAmelCase = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
__lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.decode(*__a , **__a )
@property
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 57
| 1
|
"""simple docstring"""
from manim import *
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = Rectangle(height=0.5 , width=0.5 )
__lowerCAmelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = VGroup(*__a ).arrange(__a , buff=0 )
__lowerCAmelCase = VGroup(*__a ).arrange(__a , buff=0 )
__lowerCAmelCase = VGroup(__a , __a ).arrange(__a , buff=0 )
__lowerCAmelCase = Text("CPU" , font_size=24 )
__lowerCAmelCase = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
__lowerCAmelCase = [mem.copy() for i in range(4 )]
__lowerCAmelCase = VGroup(*__a ).arrange(__a , buff=0 )
__lowerCAmelCase = Text("GPU" , font_size=24 )
__lowerCAmelCase = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.move_to([-1, -1, 0] )
self.add(__a )
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = VGroup(*__a ).arrange(__a , buff=0 )
__lowerCAmelCase = Text("Model" , font_size=24 )
__lowerCAmelCase = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.add(__a )
__lowerCAmelCase = []
for i, rect in enumerate(__a ):
rect.set_stroke(__a )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
__lowerCAmelCase = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=__a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__a , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__a , buff=0.0 )
self.add(__a )
cpu_targs.append(__a )
__lowerCAmelCase = [mem.copy() for i in range(6 )]
__lowerCAmelCase = VGroup(*__a ).arrange(__a , buff=0 )
__lowerCAmelCase = Text("Loaded Checkpoint" , font_size=24 )
__lowerCAmelCase = Group(__a , __a ).arrange(__a , aligned_edge=__a , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
__lowerCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowerCAmelCase = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__a , __a )
__lowerCAmelCase = MarkupText(
f"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
__lowerCAmelCase = MarkupText(
f"Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__a ) , Write(__a ) )
self.play(Write(__a , run_time=1 ) , Create(__a , run_time=1 ) )
__lowerCAmelCase = []
__lowerCAmelCase = []
for i, rect in enumerate(__a ):
__lowerCAmelCase = fill.copy().set_fill(__a , opacity=0.7 )
target.move_to(__a )
first_animations.append(GrowFromCenter(__a , run_time=1 ) )
__lowerCAmelCase = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__a , run_time=1.5 ) )
self.play(*__a )
self.play(*__a )
self.wait()
| 57
|
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _UpperCamelCase :
'''simple docstring'''
pass
| 57
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Tuple = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
|
"""simple docstring"""
import sys
from collections import defaultdict
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
return self.node_position[vertex]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = pos
def snake_case ( self , __a , __a , __a , __a ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCAmelCase = 2 * start + 1
else:
__lowerCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCAmelCase , __lowerCAmelCase = heap[smallest_child], positions[smallest_child]
__lowerCAmelCase , __lowerCAmelCase = (
heap[start],
positions[start],
)
__lowerCAmelCase , __lowerCAmelCase = temp, tempa
__lowerCAmelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __a )
self.top_to_bottom(__a , __a , __a , __a )
def snake_case ( self , __a , __a , __a , __a ):
__lowerCAmelCase = position[index]
while index != 0:
__lowerCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCAmelCase = heap[parent]
__lowerCAmelCase = position[parent]
self.set_position(position[parent] , __a )
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , __a )
break
__lowerCAmelCase = parent
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , 0 )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = len(__a ) // 2 - 1
for i in range(__a , -1 , -1 ):
self.top_to_bottom(__a , __a , len(__a ) , __a )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = positions[0]
__lowerCAmelCase = sys.maxsize
self.top_to_bottom(__a , 0 , len(__a ) , __a )
return temp
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = Heap()
__lowerCAmelCase = [0] * len(_UpperCamelCase )
__lowerCAmelCase = [-1] * len(_UpperCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCAmelCase = []
for vertex in range(len(_UpperCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCamelCase )
heap.node_position.append(_UpperCamelCase )
__lowerCAmelCase = []
__lowerCAmelCase = 1
__lowerCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCAmelCase = 0
__lowerCAmelCase = distance
heap.heapify(_UpperCamelCase , _UpperCamelCase )
for _ in range(1 , len(_UpperCamelCase ) ):
__lowerCAmelCase = heap.delete_minimum(_UpperCamelCase , _UpperCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCamelCase )]
):
__lowerCAmelCase = distance
heap.bottom_to_top(
_UpperCamelCase , heap.get_position(_UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A : Optional[Any] = int(input("Enter number of edges: ").strip())
A : Dict = defaultdict(list)
for _ in range(edges_number):
A : str = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 57
| 1
|
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a=None , __a=True , __a=None , **__a ):
__lowerCAmelCase = parent
__lowerCAmelCase = config_class
__lowerCAmelCase = has_text_modality
__lowerCAmelCase = kwargs
__lowerCAmelCase = common_properties
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(__a , __a ) , msg=f"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(__a ):
try:
setattr(__a , __a , __a )
self.parent.assertEqual(
getattr(__a , __a ) , __a , msg=f"`{name} value {idx} expected, but was {getattr(__a , __a )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(__a ):
try:
__lowerCAmelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(__a , __a ) , __a , msg=f"`{name} value {idx} expected, but was {getattr(__a , __a )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , __a )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(__a , "config.json" )
config_first.to_json_file(__a )
__lowerCAmelCase = self.config_class.from_json_file(__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(__a )
__lowerCAmelCase = self.config_class.from_pretrained(__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(__a , __a )
config_first.save_pretrained(__a )
__lowerCAmelCase = self.config_class.from_pretrained(__a , subfolder=__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__lowerCAmelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def snake_case ( self ):
if self.config_class.is_composition:
return
__lowerCAmelCase = self.config_class()
self.parent.assertIsNotNone(__a )
def snake_case ( self ):
__lowerCAmelCase = copy.deepcopy(__a )
__lowerCAmelCase = self.config_class(**__a )
__lowerCAmelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(__a , __a ) != value:
wrong_values.append((key, getattr(__a , __a ), value) )
if len(__a ) > 0:
__lowerCAmelCase = "\n".join([f"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(f"The following keys were not properly set in the config:\n{errors}" )
def snake_case ( self ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 57
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : Tuple = logging.get_logger(__name__)
A : Tuple = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
A : Optional[Any] = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location="cpu" )
return sd
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=rename_keys_prefix ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict()
__lowerCAmelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__lowerCAmelCase = key
for name_pair in rename_keys_prefix:
__lowerCAmelCase = new_key.replace(name_pair[0] , name_pair[1] )
__lowerCAmelCase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__lowerCAmelCase = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."
# Get Config
if "pre" in checkpoint_path:
__lowerCAmelCase = "pretraining"
if "vcr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f"No implementation found for `{checkpoint_path}`." )
else:
if "vcr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 512}
__lowerCAmelCase = "multichoice"
elif "vqa_advanced" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
__lowerCAmelCase = "vqa_advanced"
elif "vqa" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048, "num_labels": 3129}
__lowerCAmelCase = "vqa"
elif "nlvr" in checkpoint_path:
__lowerCAmelCase = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
__lowerCAmelCase = "nlvr"
__lowerCAmelCase = VisualBertConfig(**_UpperCamelCase )
# Load State Dict
__lowerCAmelCase = load_state_dict(_UpperCamelCase )
__lowerCAmelCase = get_new_dict(_UpperCamelCase , _UpperCamelCase )
if model_type == "pretraining":
__lowerCAmelCase = VisualBertForPreTraining(_UpperCamelCase )
elif model_type == "vqa":
__lowerCAmelCase = VisualBertForQuestionAnswering(_UpperCamelCase )
elif model_type == "nlvr":
__lowerCAmelCase = VisualBertForVisualReasoning(_UpperCamelCase )
elif model_type == "multichoice":
__lowerCAmelCase = VisualBertForMultipleChoice(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
# Save Checkpoints
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
A : Optional[int] = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 57
| 1
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
A : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
A : Optional[int] = parser.parse_args()
A : Dict = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
A : List[Any] = CLIPImageProcessor()
A : Union[str, Any] = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
A : str = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 57
|
"""simple docstring"""
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
pass
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
pass
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = [
[],
[],
[],
]
def snake_case ( self , __a , __a ):
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(__a )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self ):
return "\n".join(f"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
if len(self.queue ) == 1_00:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(__a )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
__lowerCAmelCase = min(self.queue )
self.queue.remove(__a )
return data
def __str__( self ):
return str(self.queue )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 57
| 1
|
"""simple docstring"""
import numpy as np
import datasets
A : List[Any] = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
A : Dict = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
A : Optional[int] = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def snake_case ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ),
} ) , )
def snake_case ( self , __a , __a ):
# convert to numpy arrays
__lowerCAmelCase = np.array(__a )
__lowerCAmelCase = np.array(__a )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("Expected `X` to be a 2D vector" )
if len(reference_distribution.shape ) != 2:
raise ValueError("Expected `reference_distribution` to be a 2D vector" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" )
# Get mahalanobis distance for each prediction
__lowerCAmelCase = X - np.mean(__a )
__lowerCAmelCase = np.cov(reference_distribution.T )
try:
__lowerCAmelCase = np.linalg.inv(__a )
except np.linalg.LinAlgError:
__lowerCAmelCase = np.linalg.pinv(__a )
__lowerCAmelCase = np.dot(__a , __a )
__lowerCAmelCase = np.dot(__a , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 57
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__lowerCAmelCase = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
A : Dict = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
|
"""simple docstring"""
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = 1
while len(_UpperCamelCase ) < 1e6:
constant.append(str(_UpperCamelCase ) )
i += 1
__lowerCAmelCase = "".join(_UpperCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 57
| 1
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A : Optional[Any] = logging.get_logger(__name__)
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = original_name.split("." )[0]
__lowerCAmelCase = key.split("." )
__lowerCAmelCase = int(key_list[key_list.index(_UpperCamelCase ) - 2] )
__lowerCAmelCase = int(key_list[key_list.index(_UpperCamelCase ) - 1] )
__lowerCAmelCase = orig_block_num - offset
__lowerCAmelCase = key.replace(f"{orig_block_num}.{layer_num}.{original_name}" , f"block.{new_block_num}.{layer_num}.{new_name}" )
return key
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict()
__lowerCAmelCase , __lowerCAmelCase = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
__lowerCAmelCase = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
__lowerCAmelCase = key[: key.find("proj" )]
__lowerCAmelCase = key.replace(_UpperCamelCase , f"patch_embeddings.{total_embed_found}." )
__lowerCAmelCase = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
__lowerCAmelCase = "poolformer.encoder." + key
if "mlp.fc1" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "norm1" , "before_norm" )
if "norm2" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "norm2" , "after_norm" )
if "layer_scale_1" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
__lowerCAmelCase = replace_key_with_offset(_UpperCamelCase , _UpperCamelCase , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
__lowerCAmelCase = key.replace("head" , "classifier" )
__lowerCAmelCase = value
return new_state_dict
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return image
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = PoolFormerConfig()
# set attributes based on model_name
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = model_name[-3:]
__lowerCAmelCase = 1000
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = (1, 1000)
# set config attributes
__lowerCAmelCase = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
__lowerCAmelCase = [2, 2, 6, 2]
__lowerCAmelCase = [64, 128, 320, 512]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 0.9
elif size == "s24":
__lowerCAmelCase = [4, 4, 12, 4]
__lowerCAmelCase = [64, 128, 320, 512]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 0.9
elif size == "s36":
__lowerCAmelCase = [6, 6, 18, 6]
__lowerCAmelCase = [64, 128, 320, 512]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 1e-6
__lowerCAmelCase = 0.9
elif size == "m36":
__lowerCAmelCase = [6, 6, 18, 6]
__lowerCAmelCase = [96, 192, 384, 768]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 1e-6
__lowerCAmelCase = 0.95
elif size == "m48":
__lowerCAmelCase = [8, 8, 24, 8]
__lowerCAmelCase = [96, 192, 384, 768]
__lowerCAmelCase = 4.0
__lowerCAmelCase = 1e-6
__lowerCAmelCase = 0.95
else:
raise ValueError(f"Size {size} not supported" )
# load image processor
__lowerCAmelCase = PoolFormerImageProcessor(crop_pct=_UpperCamelCase )
# Prepare image
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=_UpperCamelCase , return_tensors="pt" ).pixel_values
logger.info(f"Converting model {model_name}..." )
# load original state dict
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location=torch.device("cpu" ) )
# rename keys
__lowerCAmelCase = rename_keys(_UpperCamelCase )
# create HuggingFace model and load state dict
__lowerCAmelCase = PoolFormerForImageClassification(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
# Define image processor
__lowerCAmelCase = PoolFormerImageProcessor(crop_pct=_UpperCamelCase )
__lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
__lowerCAmelCase = model(_UpperCamelCase )
__lowerCAmelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
__lowerCAmelCase = torch.tensor([-0.30_45, -0.67_58, -0.48_69] )
elif size == "s24":
__lowerCAmelCase = torch.tensor([0.44_02, -0.13_74, -0.80_45] )
elif size == "s36":
__lowerCAmelCase = torch.tensor([-0.60_80, -0.51_33, -0.58_98] )
elif size == "m36":
__lowerCAmelCase = torch.tensor([0.39_52, 0.22_63, -1.26_68] )
elif size == "m48":
__lowerCAmelCase = torch.tensor([0.11_67, -0.06_56, -0.34_23] )
else:
raise ValueError(f"Size {size} not supported" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _UpperCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="poolformer_s12",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
A : int = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 57
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A : Union[str, Any] = imread(R"digital_image_processing/image_data/lena_small.jpg")
A : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = cn.convert_to_negative(_UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
'''simple docstring'''
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCamelCase , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase = canny.canny(_UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert gg.gaussian_filter(_UpperCamelCase , 5 , sigma=0.9 ).all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCAmelCase = conv.img_convolve(_UpperCamelCase , _UpperCamelCase ).astype(_UpperCamelCase )
assert res.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert med.median_filter(_UpperCamelCase , 3 ).any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(_UpperCamelCase )
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = sp.make_sepia(_UpperCamelCase , 20 )
assert sepia.all()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
__lowerCAmelCase = bs.Burkes(imread(_UpperCamelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
__lowerCAmelCase = rs.NearestNeighbour(imread(_UpperCamelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
__lowerCAmelCase = imread(_UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = image[x_coordinate][y_coordinate]
__lowerCAmelCase = lbp.get_neighbors_pixel(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__lowerCAmelCase = lbp.local_binary_value(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert lbp_image.any()
| 57
| 1
|
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
A : Optional[Any] = "http://www.mocksite.com/file1.txt"
A : Optional[Any] = "\"text\": [\"foo\", \"foo\"]"
A : Optional[Any] = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class _UpperCamelCase :
'''simple docstring'''
__UpperCAmelCase : Any =2_0_0
__UpperCAmelCase : Optional[int] ={"""Content-Length""": """100"""}
__UpperCAmelCase : Optional[int] ={}
def snake_case ( self , **__a ):
return [bytes(__a , "utf-8" )]
def _lowerCamelCase ( *_UpperCamelCase , **_UpperCamelCase ):
'''simple docstring'''
return MockResponse()
@pytest.mark.parametrize("urls_type" , [str, list, dict] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
import requests
monkeypatch.setattr(_UpperCamelCase , "request" , _UpperCamelCase )
__lowerCAmelCase = URL
if issubclass(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = url
elif issubclass(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = [url]
elif issubclass(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = {"train": url}
__lowerCAmelCase = "dummy"
__lowerCAmelCase = "downloads"
__lowerCAmelCase = tmp_path
__lowerCAmelCase = DownloadConfig(
cache_dir=os.path.join(_UpperCamelCase , _UpperCamelCase ) , use_etag=_UpperCamelCase , )
__lowerCAmelCase = DownloadManager(dataset_name=_UpperCamelCase , download_config=_UpperCamelCase )
__lowerCAmelCase = dl_manager.download(_UpperCamelCase )
__lowerCAmelCase = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = [downloaded_paths]
__lowerCAmelCase = [urls]
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
assert "train" in downloaded_paths.keys()
__lowerCAmelCase = downloaded_paths.values()
__lowerCAmelCase = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_UpperCamelCase , _UpperCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__lowerCAmelCase = Path(_UpperCamelCase )
__lowerCAmelCase = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__lowerCAmelCase = downloaded_path.read_text()
assert content == CONTENT
__lowerCAmelCase = downloaded_path.with_suffix(".json" )
assert metadata_downloaded_path.exists()
__lowerCAmelCase = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type" , [str, list, dict] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = str(_UpperCamelCase )
if issubclass(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = filename
elif issubclass(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = [filename]
elif issubclass(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = {"train": filename}
__lowerCAmelCase = "dummy"
__lowerCAmelCase = xz_file.parent
__lowerCAmelCase = "extracted"
__lowerCAmelCase = DownloadConfig(
cache_dir=_UpperCamelCase , use_etag=_UpperCamelCase , )
__lowerCAmelCase = DownloadManager(dataset_name=_UpperCamelCase , download_config=_UpperCamelCase )
__lowerCAmelCase = dl_manager.extract(_UpperCamelCase )
__lowerCAmelCase = paths
for extracted_paths in [extracted_paths]:
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase = [extracted_paths]
__lowerCAmelCase = [paths]
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
assert "train" in extracted_paths.keys()
__lowerCAmelCase = extracted_paths.values()
__lowerCAmelCase = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_UpperCamelCase , _UpperCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__lowerCAmelCase = Path(_UpperCamelCase )
__lowerCAmelCase = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_UpperCamelCase , etag=_UpperCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__lowerCAmelCase = extracted_path.read_text()
__lowerCAmelCase = text_file.read_text()
assert extracted_file_content == expected_file_content
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
assert path.endswith(".jsonl" )
for num_items, line in enumerate(_UpperCamelCase , start=1 ):
__lowerCAmelCase = json.loads(line.decode("utf-8" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl" , ["tar_jsonl_path", "zip_jsonl_path"] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = request.getfixturevalue(_UpperCamelCase )
__lowerCAmelCase = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_UpperCamelCase ) , start=1 ):
_test_jsonl(_UpperCamelCase , _UpperCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl" , ["tar_nested_jsonl_path", "zip_nested_jsonl_path"] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = request.getfixturevalue(_UpperCamelCase )
__lowerCAmelCase = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_UpperCamelCase ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_UpperCamelCase ) , start=1 ):
_test_jsonl(_UpperCamelCase , _UpperCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_UpperCamelCase ) , start=1 ):
assert os.path.basename(_UpperCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 57
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Optional[int] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 1
|
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
A : List[str] = {
"sample_size": 3_2,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1_0_0_0,
"block_out_channels": [3_2, 6_4],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
A : Tuple = {
"sample_size": 6_4,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1_0_0_0,
"block_out_channels": [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
"attention_head_dim": 6_4,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
A : Dict = {
"sample_size": 2_5_6,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
"attention_head_dim": 6_4,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
A : Optional[Any] = {
"num_train_timesteps": 4_0,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
A : List[str] = {
"num_train_timesteps": 2_0_1,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
A : Optional[Any] = {
"num_train_timesteps": 1_5_1,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
'''simple docstring'''
__lowerCAmelCase = checkpoint[f"{old_prefix}.in_layers.0.weight"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.in_layers.0.bias"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.in_layers.2.weight"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.in_layers.2.bias"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.emb_layers.1.weight"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.emb_layers.1.bias"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.out_layers.0.weight"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.out_layers.0.bias"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.out_layers.3.weight"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.out_layers.3.bias"]
if has_skip:
__lowerCAmelCase = checkpoint[f"{old_prefix}.skip_connection.weight"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.skip_connection.bias"]
return new_checkpoint
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = checkpoint[f"{old_prefix}.qkv.weight"].chunk(3 , dim=0 )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = checkpoint[f"{old_prefix}.qkv.bias"].chunk(3 , dim=0 )
__lowerCAmelCase = checkpoint[f"{old_prefix}.norm.weight"]
__lowerCAmelCase = checkpoint[f"{old_prefix}.norm.bias"]
__lowerCAmelCase = weight_q.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase = bias_q.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase = weight_k.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase = bias_k.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase = weight_v.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase = bias_v.squeeze(-1 ).squeeze(-1 )
__lowerCAmelCase = (
checkpoint[f"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 )
)
__lowerCAmelCase = checkpoint[f"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location="cpu" )
__lowerCAmelCase = {}
__lowerCAmelCase = checkpoint["time_embed.0.weight"]
__lowerCAmelCase = checkpoint["time_embed.0.bias"]
__lowerCAmelCase = checkpoint["time_embed.2.weight"]
__lowerCAmelCase = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
__lowerCAmelCase = checkpoint["label_emb.weight"]
__lowerCAmelCase = checkpoint["input_blocks.0.0.weight"]
__lowerCAmelCase = checkpoint["input_blocks.0.0.bias"]
__lowerCAmelCase = unet_config["down_block_types"]
__lowerCAmelCase = unet_config["layers_per_block"]
__lowerCAmelCase = unet_config["attention_head_dim"]
__lowerCAmelCase = unet_config["block_out_channels"]
__lowerCAmelCase = 1
__lowerCAmelCase = channels_list[0]
for i, layer_type in enumerate(_UpperCamelCase ):
__lowerCAmelCase = channels_list[i]
__lowerCAmelCase = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(_UpperCamelCase ):
__lowerCAmelCase = f"down_blocks.{i}.resnets.{j}"
__lowerCAmelCase = f"input_blocks.{current_layer}.0"
__lowerCAmelCase = True if j == 0 and downsample_block_has_skip else False
__lowerCAmelCase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , has_skip=_UpperCamelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(_UpperCamelCase ):
__lowerCAmelCase = f"down_blocks.{i}.resnets.{j}"
__lowerCAmelCase = f"input_blocks.{current_layer}.0"
__lowerCAmelCase = True if j == 0 and downsample_block_has_skip else False
__lowerCAmelCase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , has_skip=_UpperCamelCase )
__lowerCAmelCase = f"down_blocks.{i}.attentions.{j}"
__lowerCAmelCase = f"input_blocks.{current_layer}.1"
__lowerCAmelCase = convert_attention(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
current_layer += 1
if i != len(_UpperCamelCase ) - 1:
__lowerCAmelCase = f"down_blocks.{i}.downsamplers.0"
__lowerCAmelCase = f"input_blocks.{current_layer}.0"
__lowerCAmelCase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
current_layer += 1
__lowerCAmelCase = current_channels
# hardcoded the mid-block for now
__lowerCAmelCase = "mid_block.resnets.0"
__lowerCAmelCase = "middle_block.0"
__lowerCAmelCase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = "mid_block.attentions.0"
__lowerCAmelCase = "middle_block.1"
__lowerCAmelCase = convert_attention(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = "mid_block.resnets.1"
__lowerCAmelCase = "middle_block.2"
__lowerCAmelCase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = 0
__lowerCAmelCase = unet_config["up_block_types"]
for i, layer_type in enumerate(_UpperCamelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
__lowerCAmelCase = f"up_blocks.{i}.resnets.{j}"
__lowerCAmelCase = f"output_blocks.{current_layer}.0"
__lowerCAmelCase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , has_skip=_UpperCamelCase )
current_layer += 1
if i != len(_UpperCamelCase ) - 1:
__lowerCAmelCase = f"up_blocks.{i}.upsamplers.0"
__lowerCAmelCase = f"output_blocks.{current_layer-1}.1"
__lowerCAmelCase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
__lowerCAmelCase = f"up_blocks.{i}.resnets.{j}"
__lowerCAmelCase = f"output_blocks.{current_layer}.0"
__lowerCAmelCase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , has_skip=_UpperCamelCase )
__lowerCAmelCase = f"up_blocks.{i}.attentions.{j}"
__lowerCAmelCase = f"output_blocks.{current_layer}.1"
__lowerCAmelCase = convert_attention(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
current_layer += 1
if i != len(_UpperCamelCase ) - 1:
__lowerCAmelCase = f"up_blocks.{i}.upsamplers.0"
__lowerCAmelCase = f"output_blocks.{current_layer-1}.2"
__lowerCAmelCase = convert_resnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = checkpoint["out.0.weight"]
__lowerCAmelCase = checkpoint["out.0.bias"]
__lowerCAmelCase = checkpoint["out.2.weight"]
__lowerCAmelCase = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
A : Optional[Any] = parser.parse_args()
A : int = strabool(args.class_cond)
A : Dict = os.path.basename(args.unet_path)
print(f'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
A : Union[str, Any] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
A : Any = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
A : Union[str, Any] = TEST_UNET_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
A : Union[str, Any] = None
A : int = con_pt_to_diffuser(args.unet_path, unet_config)
A : Tuple = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
A : Union[str, Any] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
A : Any = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
A : Any = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
A : Optional[int] = CMStochasticIterativeScheduler(**scheduler_config)
A : Optional[Any] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 57
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
A : Dict = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[float] =field(
default=0.0 ,metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
__UpperCAmelCase : bool =field(default=lowerCAmelCase__ ,metadata={"""help""": """Whether to SortishSamler or not."""} )
__UpperCAmelCase : bool =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
__UpperCAmelCase : bool =field(default=lowerCAmelCase__ ,metadata={"""help""": """whether to use adafactor"""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(default=lowerCAmelCase__ ,metadata={"""help""": """Dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[str] =field(
default="""linear""" ,metadata={"""help""": F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} ,)
| 57
| 1
|
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0.00
__lowerCAmelCase = 0
for resistor in resistors:
if resistor <= 0:
__lowerCAmelCase = f"Resistor at index {index} has a negative or zero value!"
raise ValueError(_UpperCamelCase )
first_sum += 1 / float(_UpperCamelCase )
index += 1
return 1 / first_sum
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0.00
__lowerCAmelCase = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__lowerCAmelCase = f"Resistor at index {index} has a negative value!"
raise ValueError(_UpperCamelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
A : Any = "examples/"
A : Optional[Any] = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
A : Optional[int] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
A : List[Any] = "README.md"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase , __lowerCAmelCase = REPLACE_PATTERNS[pattern]
__lowerCAmelCase = replace.replace("VERSION" , _UpperCamelCase )
__lowerCAmelCase = re_pattern.sub(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for folder, directories, fnames in os.walk(_UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , pattern="examples" )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not patch:
update_version_in_examples(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "🤗 Transformers currently provides the following architectures"
__lowerCAmelCase = "1. Want to contribute a new model?"
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.readlines()
# Find the start of the list.
__lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
__lowerCAmelCase = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["init"] , "r" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(_UpperCamelCase ).groups()[0]
return packaging.version.parse(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase=False ):
'''simple docstring'''
__lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
__lowerCAmelCase = default_version.base_version
elif patch:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__lowerCAmelCase = input(f"Which version are you releasing? [{default_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = default_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase , patch=_UpperCamelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = get_version()
__lowerCAmelCase = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
__lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
__lowerCAmelCase = input(f"Which version are we developing now? [{dev_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = dev_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
A : Dict = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 57
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.0_2 , __a=4 , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_attention_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_choices
def snake_case ( self ):
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_attention_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__a , )
return config, input_ids, attention_mask
def snake_case ( self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class _UpperCamelCase ( lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =(
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case ( self ):
__lowerCAmelCase = FlaxDistilBertModelTester(self )
@slow
def snake_case ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase = model_class_name.from_pretrained("distilbert-base-uncased" )
__lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(__a )
@require_flax
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self ):
__lowerCAmelCase = FlaxDistilBertModel.from_pretrained("distilbert-base-uncased" )
__lowerCAmelCase = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__lowerCAmelCase = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowerCAmelCase = model(__a , attention_mask=__a )[0]
__lowerCAmelCase = (1, 11, 7_68)
self.assertEqual(output.shape , __a )
__lowerCAmelCase = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4 ) )
| 57
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Tuple = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 1
|
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A : int = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_2_8,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 5_0,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 1_0,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 1_0,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def snake_case ( cls ):
__lowerCAmelCase = TOKEN
HfFolder.save_token(__a )
@classmethod
def snake_case ( cls ):
try:
delete_repo(token=cls._token , repo_id="test-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-config-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-config" )
except HTTPError:
pass
def snake_case ( self ):
__lowerCAmelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("test-config" , use_auth_token=self._token )
__lowerCAmelCase = BertConfig.from_pretrained(f"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__a , repo_id="test-config" , push_to_hub=__a , use_auth_token=self._token )
__lowerCAmelCase = BertConfig.from_pretrained(f"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
def snake_case ( self ):
__lowerCAmelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("valid_org/test-config-org" , use_auth_token=self._token )
__lowerCAmelCase = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__a , repo_id="valid_org/test-config-org" , push_to_hub=__a , use_auth_token=self._token )
__lowerCAmelCase = BertConfig.from_pretrained("valid_org/test-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
def snake_case ( self ):
CustomConfig.register_for_auto_class()
__lowerCAmelCase = CustomConfig(attribute=42 )
config.push_to_hub("test-dynamic-config" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"AutoConfig": "custom_configuration.CustomConfig"} )
__lowerCAmelCase = AutoConfig.from_pretrained(f"{USER}/test-dynamic-config" , trust_remote_code=__a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , "CustomConfig" )
self.assertEqual(new_config.attribute , 42 )
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__lowerCAmelCase = c.n_embd + 1 # int
__lowerCAmelCase = c.resid_pdrop + 1.0 # float
__lowerCAmelCase = not c.scale_attn_weights # bool
__lowerCAmelCase = c.summary_type + "foo" # str
c.update_from_string(
f"n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}" )
self.assertEqual(__a , c.n_embd , "mismatch for key: n_embd" )
self.assertEqual(__a , c.resid_pdrop , "mismatch for key: resid_pdrop" )
self.assertEqual(__a , c.scale_attn_weights , "mismatch for key: scale_attn_weights" )
self.assertEqual(__a , c.summary_type , "mismatch for key: summary_type" )
def snake_case ( self ):
__lowerCAmelCase = PretrainedConfig()
__lowerCAmelCase = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__a , ["is_encoder_decoder", "_name_or_path", "_commit_hash", "transformers_version"] )
__lowerCAmelCase = [key for key, value in config_common_kwargs.items() if value == getattr(__a , __a )]
if len(__a ) > 0:
raise ValueError(
"The following keys are set with the default values in"
" `test_configuration_common.config_common_kwargs` pick another value for them:"
f" {', '.join(__a )}." )
def snake_case ( self ):
with self.assertRaises(__a ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCAmelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" )
__lowerCAmelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert-subfolder" , subfolder="bert" )
self.assertIsNotNone(__a )
def snake_case ( self ):
# A mock response for an HTTP head request to emulate server down
__lowerCAmelCase = mock.Mock()
__lowerCAmelCase = 5_00
__lowerCAmelCase = {}
__lowerCAmelCase = HTTPError
__lowerCAmelCase = {}
# Download this model to make sure it's in the cache.
__lowerCAmelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__a ) as mock_head:
__lowerCAmelCase = BertConfig.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case ( self ):
# This test is for deprecated behavior and can be removed in v5
__lowerCAmelCase = BertConfig.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json" )
def snake_case ( self ):
__lowerCAmelCase = AutoConfig.from_pretrained("bert-base-cased" )
__lowerCAmelCase = ["config.4.0.0.json"]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__a )
__lowerCAmelCase = 2
json.dump(configuration.to_dict() , open(os.path.join(__a , "config.4.0.0.json" ) , "w" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__lowerCAmelCase = AutoConfig.from_pretrained(__a )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__lowerCAmelCase = ["config.42.0.0.json"]
__lowerCAmelCase = 7_68
configuration.save_pretrained(__a )
shutil.move(os.path.join(__a , "config.4.0.0.json" ) , os.path.join(__a , "config.42.0.0.json" ) )
__lowerCAmelCase = AutoConfig.from_pretrained(__a )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def snake_case ( self ):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__lowerCAmelCase = "hf-internal-testing/test-two-configs"
import transformers as new_transformers
__lowerCAmelCase = "v4.0.0"
__lowerCAmelCase , __lowerCAmelCase = new_transformers.models.auto.AutoConfig.from_pretrained(
__a , return_unused_kwargs=__a )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__a , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__lowerCAmelCase = "v3.0.0"
__lowerCAmelCase = old_transformers.models.auto.AutoConfig.from_pretrained(__a )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 57
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase = 6008_5147_5143 ):
'''simple docstring'''
try:
__lowerCAmelCase = int(_UpperCamelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__lowerCAmelCase = 2
__lowerCAmelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__lowerCAmelCase = i
while n % i == 0:
__lowerCAmelCase = n // i
i += 1
return int(_UpperCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 57
| 1
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
A : Dict = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[float] =field(
default=0.0 ,metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
__UpperCAmelCase : bool =field(default=lowerCAmelCase__ ,metadata={"""help""": """Whether to SortishSamler or not."""} )
__UpperCAmelCase : bool =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
__UpperCAmelCase : bool =field(default=lowerCAmelCase__ ,metadata={"""help""": """whether to use adafactor"""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(default=lowerCAmelCase__ ,metadata={"""help""": """Dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[str] =field(
default="""linear""" ,metadata={"""help""": F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} ,)
| 57
|
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a=None , __a=True , __a=None , **__a ):
__lowerCAmelCase = parent
__lowerCAmelCase = config_class
__lowerCAmelCase = has_text_modality
__lowerCAmelCase = kwargs
__lowerCAmelCase = common_properties
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(__a , __a ) , msg=f"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(__a ):
try:
setattr(__a , __a , __a )
self.parent.assertEqual(
getattr(__a , __a ) , __a , msg=f"`{name} value {idx} expected, but was {getattr(__a , __a )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(__a ):
try:
__lowerCAmelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(__a , __a ) , __a , msg=f"`{name} value {idx} expected, but was {getattr(__a , __a )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , __a )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(__a , "config.json" )
config_first.to_json_file(__a )
__lowerCAmelCase = self.config_class.from_json_file(__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(__a )
__lowerCAmelCase = self.config_class.from_pretrained(__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(__a , __a )
config_first.save_pretrained(__a )
__lowerCAmelCase = self.config_class.from_pretrained(__a , subfolder=__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__lowerCAmelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def snake_case ( self ):
if self.config_class.is_composition:
return
__lowerCAmelCase = self.config_class()
self.parent.assertIsNotNone(__a )
def snake_case ( self ):
__lowerCAmelCase = copy.deepcopy(__a )
__lowerCAmelCase = self.config_class(**__a )
__lowerCAmelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(__a , __a ) != value:
wrong_values.append((key, getattr(__a , __a ), value) )
if len(__a ) > 0:
__lowerCAmelCase = "\n".join([f"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(f"The following keys were not properly set in the config:\n{errors}" )
def snake_case ( self ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 57
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Dict ="""dandelin/vilt-b32-finetuned-vqa"""
__UpperCAmelCase : str =(
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
__UpperCAmelCase : List[str] ="""image_qa"""
__UpperCAmelCase : Any =AutoProcessor
__UpperCAmelCase : int =AutoModelForVisualQuestionAnswering
__UpperCAmelCase : str =["""image""", """text"""]
__UpperCAmelCase : Dict =["""text"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["vision"] )
super().__init__(*__a , **__a )
def snake_case ( self , __a , __a ):
return self.pre_processor(__a , __a , return_tensors="pt" )
def snake_case ( self , __a ):
with torch.no_grad():
return self.model(**__a ).logits
def snake_case ( self , __a ):
__lowerCAmelCase = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 57
|
"""simple docstring"""
A : int = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 57
| 1
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
A : int = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__a , **__a ):
super().__init__(*__a , **__a )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def snake_case ( self , __a=None , __a=None , __a=None ):
__lowerCAmelCase = {}
__lowerCAmelCase = {}
if prompt is not None:
__lowerCAmelCase = prompt
if generate_kwargs is not None:
__lowerCAmelCase = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__lowerCAmelCase = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
__lowerCAmelCase = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , __a , **__a ):
return super().__call__(__a , **__a )
def snake_case ( self , __a , __a=None ):
__lowerCAmelCase = load_image(__a )
if prompt is not None:
if not isinstance(__a , __a ):
raise ValueError(
f"Received an invalid text input, got - {type(__a )} - but expected a single string. "
"Note also that one single text can be provided for conditional image to text generation." )
__lowerCAmelCase = self.model.config.model_type
if model_type == "git":
__lowerCAmelCase = self.image_processor(images=__a , return_tensors=self.framework )
__lowerCAmelCase = self.tokenizer(text=__a , add_special_tokens=__a ).input_ids
__lowerCAmelCase = [self.tokenizer.cls_token_id] + input_ids
__lowerCAmelCase = torch.tensor(__a ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
__lowerCAmelCase = self.image_processor(images=__a , header_text=__a , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__lowerCAmelCase = self.image_processor(images=__a , return_tensors=self.framework )
__lowerCAmelCase = self.tokenizer(__a , return_tensors=self.framework )
model_inputs.update(__a )
else:
raise ValueError(f"Model type {model_type} does not support conditional text generation" )
else:
__lowerCAmelCase = self.image_processor(images=__a , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__lowerCAmelCase = None
return model_inputs
def snake_case ( self , __a , __a=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , __a )
and all(x is None for x in model_inputs["input_ids"] )
):
__lowerCAmelCase = None
if generate_kwargs is None:
__lowerCAmelCase = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__lowerCAmelCase = model_inputs.pop(self.model.main_input_name )
__lowerCAmelCase = self.model.generate(__a , **__a , **__a )
return model_outputs
def snake_case ( self , __a ):
__lowerCAmelCase = []
for output_ids in model_outputs:
__lowerCAmelCase = {
"generated_text": self.tokenizer.decode(
__a , skip_special_tokens=__a , )
}
records.append(__a )
return records
| 57
|
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A : str = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =["""input_ids""", """attention_mask"""]
def __init__( self , __a="</s>" , __a="<unk>" , __a="<pad>" , __a=1_25 , __a=None , **__a , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__lowerCAmelCase = [f"<extra_id_{i}>" for i in range(__a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__lowerCAmelCase = len(set(filter(lambda __a : bool("extra_id" in str(__a ) ) , __a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
super().__init__(
eos_token=__a , unk_token=__a , pad_token=__a , extra_ids=__a , additional_special_tokens=__a , **__a , )
__lowerCAmelCase = extra_ids
__lowerCAmelCase = 2**8 # utf is 8 bits
# define special tokens dict
__lowerCAmelCase = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__lowerCAmelCase = len(self.special_tokens_encoder )
__lowerCAmelCase = len(__a )
for i, token in enumerate(__a ):
__lowerCAmelCase = self.vocab_size + i - n
__lowerCAmelCase = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def snake_case ( self ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def snake_case ( self , __a , __a = None , __a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__a )) + [1]
return ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
def snake_case ( self , __a ):
if len(__a ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = self._add_eos_if_not_present(__a )
if token_ids_a is None:
return token_ids_a
else:
__lowerCAmelCase = self._add_eos_if_not_present(__a )
return token_ids_a + token_ids_a
def snake_case ( self , __a ):
__lowerCAmelCase = [chr(__a ) for i in text.encode("utf-8" )]
return tokens
def snake_case ( self , __a ):
if token in self.special_tokens_encoder:
__lowerCAmelCase = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__lowerCAmelCase = self.added_tokens_encoder[token]
elif len(__a ) != 1:
__lowerCAmelCase = self.unk_token_id
else:
__lowerCAmelCase = ord(__a ) + self._num_special_tokens
return token_id
def snake_case ( self , __a ):
if index in self.special_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[index]
else:
__lowerCAmelCase = chr(index - self._num_special_tokens )
return token
def snake_case ( self , __a ):
__lowerCAmelCase = B""
for token in tokens:
if token in self.special_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
__lowerCAmelCase = token.encode("utf-8" )
elif token in self.added_tokens_encoder:
__lowerCAmelCase = token.encode("utf-8" )
else:
__lowerCAmelCase = bytes([ord(__a )] )
bstring += tok_string
__lowerCAmelCase = bstring.decode("utf-8" , errors="ignore" )
return string
def snake_case ( self , __a , __a = None ):
return ()
| 57
| 1
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
'''simple docstring'''
__lowerCAmelCase = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
__lowerCAmelCase = 1 - (matter_density + radiation_density + dark_energy)
__lowerCAmelCase = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__lowerCAmelCase = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
A : Tuple = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 57
|
"""simple docstring"""
import numpy
# List of input, output pairs
A : Any = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
A : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
A : Union[str, Any] = [2, 4, 1, 5]
A : int = len(train_data)
A : Dict = 0.009
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase="train" ):
'''simple docstring'''
return calculate_hypothesis_value(_UpperCamelCase , _UpperCamelCase ) - output(
_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
for i in range(len(_UpperCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=m ):
'''simple docstring'''
__lowerCAmelCase = 0
for i in range(_UpperCamelCase ):
if index == -1:
summation_value += _error(_UpperCamelCase )
else:
summation_value += _error(_UpperCamelCase ) * train_data[i][0][index]
return summation_value
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = summation_of_cost_derivative(_UpperCamelCase , _UpperCamelCase ) / m
return cost_derivative_value
def _lowerCamelCase ( ):
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__lowerCAmelCase = 0.00_00_02
__lowerCAmelCase = 0
__lowerCAmelCase = 0
while True:
j += 1
__lowerCAmelCase = [0, 0, 0, 0]
for i in range(0 , len(_UpperCamelCase ) ):
__lowerCAmelCase = get_cost_derivative(i - 1 )
__lowerCAmelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_UpperCamelCase , _UpperCamelCase , atol=_UpperCamelCase , rtol=_UpperCamelCase , ):
break
__lowerCAmelCase = temp_parameter_vector
print(("Number of iterations:", j) )
def _lowerCamelCase ( ):
'''simple docstring'''
for i in range(len(_UpperCamelCase ) ):
print(("Actual output value:", output(_UpperCamelCase , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(_UpperCamelCase , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 57
| 1
|
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
A : Any = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *__a , **__a ):
super().__init__(*__a , **__a )
requires_backends(self , "vision" )
self.check_model_type(__a )
def __call__( self , __a , **__a ):
return super().__call__(__a , **__a )
def snake_case ( self , **__a ):
return {}, {}, {}
def snake_case ( self , __a ):
__lowerCAmelCase = load_image(__a )
__lowerCAmelCase = image.size
__lowerCAmelCase = self.image_processor(images=__a , return_tensors=self.framework )
return model_inputs
def snake_case ( self , __a ):
__lowerCAmelCase = self.model(**__a )
return model_outputs
def snake_case ( self , __a ):
__lowerCAmelCase = model_outputs.predicted_depth
__lowerCAmelCase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=__a )
__lowerCAmelCase = prediction.squeeze().cpu().numpy()
__lowerCAmelCase = (output * 2_55 / np.max(__a )).astype("uint8" )
__lowerCAmelCase = Image.fromarray(__a )
__lowerCAmelCase = {}
__lowerCAmelCase = predicted_depth
__lowerCAmelCase = depth
return output_dict
| 57
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__lowerCAmelCase = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
"do_convert_rgb": True,
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__a , __a )
def snake_case ( self , **__a ):
return BertTokenizer.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , **__a ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , **__a ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __a )
self.assertIsInstance(processor_fast.tokenizer , __a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __a )
self.assertIsInstance(processor_fast.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
__lowerCAmelCase = self.get_image_processor(do_normalize=__a )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__a , return_tensors="np" )
__lowerCAmelCase = processor(images=__a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = processor(text=__a )
__lowerCAmelCase = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__a )
__lowerCAmelCase = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 57
| 1
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] =["""image_processor""", """tokenizer"""]
__UpperCAmelCase : str ="""CLIPImageProcessor"""
__UpperCAmelCase : str =("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , __a=None , __a=None , **__a ):
__lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
__lowerCAmelCase = kwargs.pop("feature_extractor" )
__lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self , __a=None , __a=None , __a=None , **__a ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__lowerCAmelCase = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
__lowerCAmelCase = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
__lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.decode(*__a , **__a )
@property
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def snake_case ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def snake_case ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 57
|
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( _UpperCamelCase = 4 ):
'''simple docstring'''
__lowerCAmelCase = abs(_UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(_UpperCamelCase )] for y in range(_UpperCamelCase )]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_row(transpose(_UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_row(reverse_column(_UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_column(transpose(_UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [list(_UpperCamelCase ) for x in zip(*_UpperCamelCase )]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = matrix[::-1]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [x[::-1] for x in matrix]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for i in matrix:
print(*_UpperCamelCase )
if __name__ == "__main__":
A : Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
A : List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
A : str = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 57
| 1
|
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
A : Union[str, Any] = logging.getLogger(__name__)
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] ="""sequence-classification"""
def __init__( self , __a ):
if type(__a ) == dict:
__lowerCAmelCase = Namespace(**__a )
__lowerCAmelCase = glue_output_modes[hparams.task]
__lowerCAmelCase = glue_tasks_num_labels[hparams.task]
super().__init__(__a , __a , self.mode )
def snake_case ( self , **__a ):
return self.model(**__a )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__lowerCAmelCase = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__lowerCAmelCase = self(**__a )
__lowerCAmelCase = outputs[0]
__lowerCAmelCase = self.trainer.lr_schedulers[0]["scheduler"]
__lowerCAmelCase = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def snake_case ( self ):
__lowerCAmelCase = self.hparams
__lowerCAmelCase = processors[args.task]()
__lowerCAmelCase = processor.get_labels()
for mode in ["train", "dev"]:
__lowerCAmelCase = self._feature_file(__a )
if os.path.exists(__a ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , __a )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
__lowerCAmelCase = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
__lowerCAmelCase = convert_examples_to_features(
__a , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , __a )
torch.save(__a , __a )
def snake_case ( self , __a , __a , __a = False ):
__lowerCAmelCase = "dev" if mode == "test" else mode
__lowerCAmelCase = self._feature_file(__a )
logger.info("Loading features from cached file %s" , __a )
__lowerCAmelCase = torch.load(__a )
__lowerCAmelCase = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__lowerCAmelCase = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__lowerCAmelCase = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__lowerCAmelCase = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__a , __a , __a , __a ) , batch_size=__a , shuffle=__a , )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__lowerCAmelCase = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
__lowerCAmelCase = self(**__a )
__lowerCAmelCase , __lowerCAmelCase = outputs[:2]
__lowerCAmelCase = logits.detach().cpu().numpy()
__lowerCAmelCase = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def snake_case ( self , __a ):
__lowerCAmelCase = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
__lowerCAmelCase = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__lowerCAmelCase = np.argmax(__a , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__lowerCAmelCase = np.squeeze(__a )
__lowerCAmelCase = np.concatenate([x["target"] for x in outputs] , axis=0 )
__lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0] )]
__lowerCAmelCase = [[] for _ in range(out_label_ids.shape[0] )]
__lowerCAmelCase = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , __a , __a )}
__lowerCAmelCase = dict(results.items() )
__lowerCAmelCase = results
return ret, preds_list, out_label_list
def snake_case ( self , __a ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._eval_end(__a )
__lowerCAmelCase = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def snake_case ( self , __a ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self._eval_end(__a )
__lowerCAmelCase = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def snake_case ( __a , __a ):
BaseTransformer.add_model_specific_args(__a , __a )
parser.add_argument(
"--max_seq_length" , default=1_28 , type=__a , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=__a , required=__a , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=__a , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = argparse.ArgumentParser()
add_generic_args(_UpperCamelCase , os.getcwd() )
__lowerCAmelCase = GLUETransformer.add_model_specific_args(_UpperCamelCase , os.getcwd() )
__lowerCAmelCase = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__lowerCAmelCase = os.path.join(
"./results" , f"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
__lowerCAmelCase = GLUETransformer(_UpperCamelCase )
__lowerCAmelCase = generic_train(_UpperCamelCase , _UpperCamelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_UpperCamelCase ) )
__lowerCAmelCase = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_UpperCamelCase )
if __name__ == "__main__":
main()
| 57
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase : Union[str, Any] =TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = TextaTextGenerationPipeline(model=__a , tokenizer=__a )
return generator, ["Something to write", "Something else"]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = generator("Something there" )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) )
__lowerCAmelCase = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
__lowerCAmelCase = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
with self.assertRaises(__a ):
generator(4 )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
__lowerCAmelCase = 3
__lowerCAmelCase = generator(
"Something there" , num_return_sequences=__a , num_beams=__a , )
__lowerCAmelCase = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(__a , __a )
__lowerCAmelCase = generator("This is a test" , do_sample=__a , num_return_sequences=2 , return_tensors=__a )
self.assertEqual(
__a , [
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
] , )
__lowerCAmelCase = generator.model.config.eos_token_id
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = generator(
["This is a test", "This is a second test"] , do_sample=__a , num_return_sequences=2 , batch_size=2 , return_tensors=__a , )
self.assertEqual(
__a , [
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
] , )
@require_tf
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
| 57
| 1
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 57
|
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCamelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCAmelCase = model
__lowerCAmelCase = 2
__lowerCAmelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def snake_case ( self ):
pass
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = LongformerModel.from_pretrained(_UpperCamelCase )
__lowerCAmelCase = LightningModel(_UpperCamelCase )
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
__lowerCAmelCase = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 57
| 1
|
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
A : Union[str, Any] = [8, 5, 9, 7]
A : Dict = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
A : Optional[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a , __a , ):
__lowerCAmelCase = claim_vector
__lowerCAmelCase = allocated_resources_table
__lowerCAmelCase = maximum_claim_table
def snake_case ( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def snake_case ( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def snake_case ( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__a ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def snake_case ( self ):
return {self.__need().index(__a ): i for i in self.__need()}
def snake_case ( self , **__a ):
__lowerCAmelCase = self.__need()
__lowerCAmelCase = self.__allocated_resources_table
__lowerCAmelCase = self.__available_resources()
__lowerCAmelCase = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
__lowerCAmelCase = False
for each_need in need_list:
__lowerCAmelCase = True
for index, need in enumerate(__a ):
if need > available_resources[index]:
__lowerCAmelCase = False
break
if execution:
__lowerCAmelCase = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowerCAmelCase = original_need_index
print(f"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(__a )
# update available/freed resources stack
__lowerCAmelCase = np.array(__a ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(__a ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def snake_case ( self ):
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f"P{self.__allocated_resources_table.index(__a ) + 1}"
+ " ".join(f"{it:>8}" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f"P{self.__maximum_claim_table.index(__a ) + 1}"
+ " ".join(f"{it:>8}" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(__a ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(__a ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
__lowerCAmelCase = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
A : Union[str, Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
A : str = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 57
| 1
|
"""simple docstring"""
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
pass
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
pass
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = [
[],
[],
[],
]
def snake_case ( self , __a , __a ):
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(__a )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self ):
return "\n".join(f"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
if len(self.queue ) == 1_00:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(__a )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
__lowerCAmelCase = min(self.queue )
self.queue.remove(__a )
return data
def __str__( self ):
return str(self.queue )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 57
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""image_processor""", """tokenizer"""]
__UpperCAmelCase : Optional[Any] ="""CLIPImageProcessor"""
__UpperCAmelCase : Union[str, Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self , __a=None , __a=None , **__a ):
__lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
__lowerCAmelCase = kwargs.pop("feature_extractor" )
__lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self , __a=None , __a=None , __a=None , **__a ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__lowerCAmelCase = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
__lowerCAmelCase = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
__lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.decode(*__a , **__a )
@property
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 57
| 1
|
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A : str = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =["""input_ids""", """attention_mask"""]
def __init__( self , __a="</s>" , __a="<unk>" , __a="<pad>" , __a=1_25 , __a=None , **__a , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__lowerCAmelCase = [f"<extra_id_{i}>" for i in range(__a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__lowerCAmelCase = len(set(filter(lambda __a : bool("extra_id" in str(__a ) ) , __a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
super().__init__(
eos_token=__a , unk_token=__a , pad_token=__a , extra_ids=__a , additional_special_tokens=__a , **__a , )
__lowerCAmelCase = extra_ids
__lowerCAmelCase = 2**8 # utf is 8 bits
# define special tokens dict
__lowerCAmelCase = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__lowerCAmelCase = len(self.special_tokens_encoder )
__lowerCAmelCase = len(__a )
for i, token in enumerate(__a ):
__lowerCAmelCase = self.vocab_size + i - n
__lowerCAmelCase = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def snake_case ( self ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def snake_case ( self , __a , __a = None , __a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__a )) + [1]
return ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
def snake_case ( self , __a ):
if len(__a ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = self._add_eos_if_not_present(__a )
if token_ids_a is None:
return token_ids_a
else:
__lowerCAmelCase = self._add_eos_if_not_present(__a )
return token_ids_a + token_ids_a
def snake_case ( self , __a ):
__lowerCAmelCase = [chr(__a ) for i in text.encode("utf-8" )]
return tokens
def snake_case ( self , __a ):
if token in self.special_tokens_encoder:
__lowerCAmelCase = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__lowerCAmelCase = self.added_tokens_encoder[token]
elif len(__a ) != 1:
__lowerCAmelCase = self.unk_token_id
else:
__lowerCAmelCase = ord(__a ) + self._num_special_tokens
return token_id
def snake_case ( self , __a ):
if index in self.special_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[index]
else:
__lowerCAmelCase = chr(index - self._num_special_tokens )
return token
def snake_case ( self , __a ):
__lowerCAmelCase = B""
for token in tokens:
if token in self.special_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
__lowerCAmelCase = token.encode("utf-8" )
elif token in self.added_tokens_encoder:
__lowerCAmelCase = token.encode("utf-8" )
else:
__lowerCAmelCase = bytes([ord(__a )] )
bstring += tok_string
__lowerCAmelCase = bstring.decode("utf-8" , errors="ignore" )
return string
def snake_case ( self , __a , __a = None ):
return ()
| 57
|
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _UpperCamelCase :
'''simple docstring'''
pass
| 57
| 1
|
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
A : Union[str, Any] = "pt"
elif is_tf_available():
A : Optional[Any] = "tf"
else:
A : Any = "jax"
class _UpperCamelCase ( lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str =PerceiverTokenizer
__UpperCAmelCase : List[str] =False
def snake_case ( self ):
super().setUp()
__lowerCAmelCase = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def snake_case ( self ):
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def snake_case ( self , **__a ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , __a , __a=False , __a=20 , __a=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__lowerCAmelCase = []
for i in range(len(__a ) ):
try:
__lowerCAmelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=__a )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__lowerCAmelCase = list(filter(lambda __a : re.match(R"^[ a-zA-Z]+$" , t[1] ) , __a ) )
__lowerCAmelCase = list(filter(lambda __a : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__a ) , __a ) )
if max_length is not None and len(__a ) > max_length:
__lowerCAmelCase = toks[:max_length]
if min_length is not None and len(__a ) < min_length and len(__a ) > 0:
while len(__a ) < min_length:
__lowerCAmelCase = toks + toks
# toks_str = [t[1] for t in toks]
__lowerCAmelCase = [t[0] for t in toks]
# Ensure consistency
__lowerCAmelCase = tokenizer.decode(__a , clean_up_tokenization_spaces=__a )
if " " not in output_txt and len(__a ) > 1:
__lowerCAmelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__a )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__a )
)
if with_prefix_space:
__lowerCAmelCase = " " + output_txt
__lowerCAmelCase = tokenizer.encode(__a , add_special_tokens=__a )
return output_txt, output_ids
def snake_case ( self ):
__lowerCAmelCase = self.perceiver_tokenizer
__lowerCAmelCase = "Unicode €."
__lowerCAmelCase = tokenizer(__a )
__lowerCAmelCase = [4, 91, 1_16, 1_11, 1_05, 1_17, 1_06, 1_07, 38, 2_32, 1_36, 1_78, 52, 5]
self.assertEqual(encoded["input_ids"] , __a )
# decoding
__lowerCAmelCase = tokenizer.decode(__a )
self.assertEqual(__a , "[CLS]Unicode €.[SEP]" )
__lowerCAmelCase = tokenizer("e è é ê ë" )
__lowerCAmelCase = [4, 1_07, 38, 2_01, 1_74, 38, 2_01, 1_75, 38, 2_01, 1_76, 38, 2_01, 1_77, 5]
self.assertEqual(encoded["input_ids"] , __a )
# decoding
__lowerCAmelCase = tokenizer.decode(__a )
self.assertEqual(__a , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def snake_case ( self ):
__lowerCAmelCase = self.perceiver_tokenizer
__lowerCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
__lowerCAmelCase = [4, 71, 38, 1_14, 1_17, 1_16, 1_09, 38, 1_18, 1_03, 1_20, 1_03, 1_09, 1_20, 1_03, 1_18, 1_10, 38, 1_08, 1_17, 1_20, 38, 1_21, 1_23, 1_15, 1_15, 1_03, 1_20, 1_11, 1_28, 1_03, 1_22, 1_11, 1_17, 1_16, 52, 5, 0]
# fmt: on
__lowerCAmelCase = tokenizer(__a , padding=__a , return_tensors=__a )
self.assertIsInstance(__a , __a )
if FRAMEWORK != "jax":
__lowerCAmelCase = list(batch.input_ids.numpy()[0] )
else:
__lowerCAmelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(__a , __a )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def snake_case ( self ):
__lowerCAmelCase = self.perceiver_tokenizer
__lowerCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
__lowerCAmelCase = tokenizer(__a , padding=__a , return_tensors=__a )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , __a )
self.assertIn("attention_mask" , __a )
self.assertNotIn("decoder_input_ids" , __a )
self.assertNotIn("decoder_attention_mask" , __a )
def snake_case ( self ):
__lowerCAmelCase = self.perceiver_tokenizer
__lowerCAmelCase = [
"Summary of the text.",
"Another summary.",
]
__lowerCAmelCase = tokenizer(
text_target=__a , max_length=32 , padding="max_length" , truncation=__a , return_tensors=__a )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def snake_case ( self ):
# safety check on max_len default value so we are sure the test works
__lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = " He is very happy, UNwant\u00E9d,running"
__lowerCAmelCase = tokenizer.encode(__a , add_special_tokens=__a )
tokenizer.save_pretrained(__a )
__lowerCAmelCase = tokenizer.__class__.from_pretrained(__a )
__lowerCAmelCase = after_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
shutil.rmtree(__a )
__lowerCAmelCase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
__lowerCAmelCase = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
__lowerCAmelCase = tokenizer.encode(__a , add_special_tokens=__a )
tokenizer.save_pretrained(__a )
__lowerCAmelCase = tokenizer.__class__.from_pretrained(__a )
__lowerCAmelCase = after_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__lowerCAmelCase = tokenizer.__class__.from_pretrained(__a , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__a )
def snake_case ( self ):
__lowerCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__a )
with open(os.path.join(__a , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
__lowerCAmelCase = json.load(__a )
with open(os.path.join(__a , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
__lowerCAmelCase = json.load(__a )
__lowerCAmelCase = [f"<extra_id_{i}>" for i in range(1_25 )]
__lowerCAmelCase = added_tokens_extra_ids + [
"an_additional_special_token"
]
__lowerCAmelCase = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(__a , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__a , __a )
with open(os.path.join(__a , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(__a , __a )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowerCAmelCase = tokenizer_class.from_pretrained(
__a , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowerCAmelCase = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=__a )]
__lowerCAmelCase = tokenizer_class.from_pretrained(
__a , additional_special_tokens=__a , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def snake_case ( self ):
__lowerCAmelCase = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([1_78] ) , "�" )
def snake_case ( self ):
pass
def snake_case ( self ):
pass
def snake_case ( self ):
pass
def snake_case ( self ):
pass
def snake_case ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__lowerCAmelCase = self.get_tokenizers(fast=__a , do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__lowerCAmelCase = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
__lowerCAmelCase = tokenizer.convert_tokens_to_string(__a )
self.assertIsInstance(__a , __a )
| 57
|
"""simple docstring"""
import sys
from collections import defaultdict
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
return self.node_position[vertex]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = pos
def snake_case ( self , __a , __a , __a , __a ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCAmelCase = 2 * start + 1
else:
__lowerCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCAmelCase , __lowerCAmelCase = heap[smallest_child], positions[smallest_child]
__lowerCAmelCase , __lowerCAmelCase = (
heap[start],
positions[start],
)
__lowerCAmelCase , __lowerCAmelCase = temp, tempa
__lowerCAmelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __a )
self.top_to_bottom(__a , __a , __a , __a )
def snake_case ( self , __a , __a , __a , __a ):
__lowerCAmelCase = position[index]
while index != 0:
__lowerCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCAmelCase = heap[parent]
__lowerCAmelCase = position[parent]
self.set_position(position[parent] , __a )
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , __a )
break
__lowerCAmelCase = parent
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , 0 )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = len(__a ) // 2 - 1
for i in range(__a , -1 , -1 ):
self.top_to_bottom(__a , __a , len(__a ) , __a )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = positions[0]
__lowerCAmelCase = sys.maxsize
self.top_to_bottom(__a , 0 , len(__a ) , __a )
return temp
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = Heap()
__lowerCAmelCase = [0] * len(_UpperCamelCase )
__lowerCAmelCase = [-1] * len(_UpperCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCAmelCase = []
for vertex in range(len(_UpperCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCamelCase )
heap.node_position.append(_UpperCamelCase )
__lowerCAmelCase = []
__lowerCAmelCase = 1
__lowerCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCAmelCase = 0
__lowerCAmelCase = distance
heap.heapify(_UpperCamelCase , _UpperCamelCase )
for _ in range(1 , len(_UpperCamelCase ) ):
__lowerCAmelCase = heap.delete_minimum(_UpperCamelCase , _UpperCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCamelCase )]
):
__lowerCAmelCase = distance
heap.bottom_to_top(
_UpperCamelCase , heap.get_position(_UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A : Optional[Any] = int(input("Enter number of edges: ").strip())
A : Dict = defaultdict(list)
for _ in range(edges_number):
A : str = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 57
| 1
|
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Tuple ="""new-model"""
if is_tf_available():
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =NewModelConfig
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self ):
__lowerCAmelCase = "bert-base-cased"
__lowerCAmelCase = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
__lowerCAmelCase = TFAutoModel.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def snake_case ( self ):
__lowerCAmelCase = "bert-base-cased"
__lowerCAmelCase = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
__lowerCAmelCase = TFAutoModelForPreTraining.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def snake_case ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
__lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained(__a )
__lowerCAmelCase , __lowerCAmelCase = TFAutoModelForCausalLM.from_pretrained(__a , output_loading_info=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def snake_case ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
__lowerCAmelCase = TFAutoModelWithLMHead.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def snake_case ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
__lowerCAmelCase = TFAutoModelForMaskedLM.from_pretrained(__a )
__lowerCAmelCase , __lowerCAmelCase = TFAutoModelForMaskedLM.from_pretrained(__a , output_loading_info=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def snake_case ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
__lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(__a )
__lowerCAmelCase , __lowerCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(__a , output_loading_info=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def snake_case ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
__lowerCAmelCase = TFAutoModelForSequenceClassification.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
def snake_case ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
__lowerCAmelCase = TFAutoModelForQuestionAnswering.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
@slow
@require_tensorflow_probability
def snake_case ( self ):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__lowerCAmelCase = AutoConfig.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
__lowerCAmelCase = TFAutoModelForTableQuestionAnswering.from_pretrained(__a )
__lowerCAmelCase , __lowerCAmelCase = TFAutoModelForTableQuestionAnswering.from_pretrained(
__a , output_loading_info=__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , __a )
def snake_case ( self ):
__lowerCAmelCase = TFAutoModelWithLMHead.from_pretrained(__a )
self.assertIsInstance(__a , __a )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__a ) , 1_44_10 )
def snake_case ( self ):
__lowerCAmelCase = TFAutoModelWithLMHead.from_pretrained(__a )
self.assertIsInstance(__a , __a )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=__a ) , 1_44_10 )
def snake_case ( self ):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
__lowerCAmelCase = TFAutoModel.from_pretrained("sgugger/funnel-random-tiny" )
self.assertIsInstance(__a , __a )
__lowerCAmelCase = copy.deepcopy(model.config )
__lowerCAmelCase = ["FunnelBaseModel"]
__lowerCAmelCase = TFAutoModel.from_config(__a )
self.assertIsInstance(__a , __a )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__a )
__lowerCAmelCase = TFAutoModel.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def snake_case ( self ):
try:
AutoConfig.register("new-model" , __a )
__lowerCAmelCase = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(__a ):
auto_class.register(__a , __a )
auto_class.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
auto_class.register(__a , __a )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowerCAmelCase = BertModelTester(self ).get_config()
__lowerCAmelCase = NewModelConfig(**tiny_config.to_dict() )
__lowerCAmelCase = auto_class.from_config(__a )
self.assertIsInstance(__a , __a )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__a )
__lowerCAmelCase = auto_class.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def snake_case ( self ):
with self.assertRaisesRegex(
__a , "bert-base is not a local folder and is not a valid model identifier" ):
__lowerCAmelCase = TFAutoModel.from_pretrained("bert-base" )
def snake_case ( self ):
with self.assertRaisesRegex(
__a , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__lowerCAmelCase = TFAutoModel.from_pretrained(__a , revision="aaaaaa" )
def snake_case ( self ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin" , ):
__lowerCAmelCase = TFAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def snake_case ( self ):
with self.assertRaisesRegex(__a , "Use `from_pt=True` to load this model" ):
__lowerCAmelCase = TFAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
def snake_case ( self ):
# Make sure we have cached the model.
__lowerCAmelCase = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
__lowerCAmelCase = TFAutoModel.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__lowerCAmelCase = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
with RequestCounter() as counter:
__lowerCAmelCase = TFAutoModel.from_pretrained("ArthurZ/tiny-random-bert-sharded" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 57
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : Tuple = logging.get_logger(__name__)
A : Tuple = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
A : Optional[Any] = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location="cpu" )
return sd
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=rename_keys_prefix ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict()
__lowerCAmelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__lowerCAmelCase = key
for name_pair in rename_keys_prefix:
__lowerCAmelCase = new_key.replace(name_pair[0] , name_pair[1] )
__lowerCAmelCase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__lowerCAmelCase = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."
# Get Config
if "pre" in checkpoint_path:
__lowerCAmelCase = "pretraining"
if "vcr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f"No implementation found for `{checkpoint_path}`." )
else:
if "vcr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 512}
__lowerCAmelCase = "multichoice"
elif "vqa_advanced" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
__lowerCAmelCase = "vqa_advanced"
elif "vqa" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048, "num_labels": 3129}
__lowerCAmelCase = "vqa"
elif "nlvr" in checkpoint_path:
__lowerCAmelCase = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
__lowerCAmelCase = "nlvr"
__lowerCAmelCase = VisualBertConfig(**_UpperCamelCase )
# Load State Dict
__lowerCAmelCase = load_state_dict(_UpperCamelCase )
__lowerCAmelCase = get_new_dict(_UpperCamelCase , _UpperCamelCase )
if model_type == "pretraining":
__lowerCAmelCase = VisualBertForPreTraining(_UpperCamelCase )
elif model_type == "vqa":
__lowerCAmelCase = VisualBertForQuestionAnswering(_UpperCamelCase )
elif model_type == "nlvr":
__lowerCAmelCase = VisualBertForVisualReasoning(_UpperCamelCase )
elif model_type == "multichoice":
__lowerCAmelCase = VisualBertForMultipleChoice(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
# Save Checkpoints
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
A : Optional[int] = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 57
| 1
|
"""simple docstring"""
import os
def _lowerCamelCase ( ):
'''simple docstring'''
with open(os.path.dirname(_UpperCamelCase ) + "/p022_names.txt" ) as file:
__lowerCAmelCase = str(file.readlines()[0] )
__lowerCAmelCase = names.replace("\"" , "" ).split("," )
names.sort()
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for i, name in enumerate(_UpperCamelCase ):
for letter in name:
name_score += ord(_UpperCamelCase ) - 64
total_score += (i + 1) * name_score
__lowerCAmelCase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 57
|
"""simple docstring"""
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
pass
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
pass
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = [
[],
[],
[],
]
def snake_case ( self , __a , __a ):
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(__a )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self ):
return "\n".join(f"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
if len(self.queue ) == 1_00:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(__a )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
__lowerCAmelCase = min(self.queue )
self.queue.remove(__a )
return data
def __str__( self ):
return str(self.queue )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 57
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Tuple =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Dict =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : int =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : int =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
def _lowerCamelCase ( *_UpperCamelCase , **_UpperCamelCase ):
'''simple docstring'''
requires_backends(_UpperCamelCase , ["torch"] )
def _lowerCamelCase ( *_UpperCamelCase , **_UpperCamelCase ):
'''simple docstring'''
requires_backends(_UpperCamelCase , ["torch"] )
def _lowerCamelCase ( *_UpperCamelCase , **_UpperCamelCase ):
'''simple docstring'''
requires_backends(_UpperCamelCase , ["torch"] )
def _lowerCamelCase ( *_UpperCamelCase , **_UpperCamelCase ):
'''simple docstring'''
requires_backends(_UpperCamelCase , ["torch"] )
def _lowerCamelCase ( *_UpperCamelCase , **_UpperCamelCase ):
'''simple docstring'''
requires_backends(_UpperCamelCase , ["torch"] )
def _lowerCamelCase ( *_UpperCamelCase , **_UpperCamelCase ):
'''simple docstring'''
requires_backends(_UpperCamelCase , ["torch"] )
def _lowerCamelCase ( *_UpperCamelCase , **_UpperCamelCase ):
'''simple docstring'''
requires_backends(_UpperCamelCase , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Tuple =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : int =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Tuple =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Tuple =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : int =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Dict =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Dict =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : int =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Tuple =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Tuple =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any =["""torch"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["torch"] )
| 57
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__lowerCAmelCase = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
| 1
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A : Optional[int] = logging.get_logger(__name__)
A : Dict = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
A : int = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
__lowerCAmelCase = "lm_head"
__lowerCAmelCase = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
__lowerCAmelCase = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
__lowerCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
__lowerCAmelCase = value
elif weight_type == "weight_g":
__lowerCAmelCase = value
elif weight_type == "weight_v":
__lowerCAmelCase = value
elif weight_type == "bias":
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = fairseq_model.state_dict()
__lowerCAmelCase = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == "group" , )
__lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCAmelCase = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__lowerCAmelCase = True
if "*" in mapped_key:
__lowerCAmelCase = name.split(_UpperCamelCase )[0].split("." )[-2]
__lowerCAmelCase = mapped_key.replace("*" , _UpperCamelCase )
if "weight_g" in name:
__lowerCAmelCase = "weight_g"
elif "weight_v" in name:
__lowerCAmelCase = "weight_v"
elif "bias" in name:
__lowerCAmelCase = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase = "weight"
else:
__lowerCAmelCase = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = full_name.split("conv_layers." )[-1]
__lowerCAmelCase = name.split("." )
__lowerCAmelCase = int(items[0] )
__lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__lowerCAmelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__lowerCAmelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__lowerCAmelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__lowerCAmelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=True ):
'''simple docstring'''
if config_path is not None:
__lowerCAmelCase = UniSpeechConfig.from_pretrained(_UpperCamelCase )
else:
__lowerCAmelCase = UniSpeechConfig()
if is_finetuned:
if dict_path:
__lowerCAmelCase = Dictionary.load_from_json(_UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowerCAmelCase = target_dict.pad_index
__lowerCAmelCase = target_dict.bos_index
__lowerCAmelCase = target_dict.eos_index
__lowerCAmelCase = len(target_dict.symbols )
__lowerCAmelCase = os.path.join(_UpperCamelCase , "vocab.json" )
if not os.path.isdir(_UpperCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_UpperCamelCase ) )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
__lowerCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowerCAmelCase = 42
__lowerCAmelCase = 43
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = WavaVecaPhonemeCTCTokenizer(
_UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_UpperCamelCase , )
__lowerCAmelCase = True if config.feat_extract_norm == "layer" else False
__lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , )
__lowerCAmelCase = WavaVecaProcessor(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
__lowerCAmelCase = UniSpeechForCTC(_UpperCamelCase )
else:
__lowerCAmelCase = UniSpeechForPreTraining(_UpperCamelCase )
if is_finetuned:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__lowerCAmelCase = model[0].eval()
recursively_load_weights(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
hf_unispeech.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
A : Optional[int] = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 57
|
"""simple docstring"""
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = 1
while len(_UpperCamelCase ) < 1e6:
constant.append(str(_UpperCamelCase ) )
i += 1
__lowerCAmelCase = "".join(_UpperCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 57
| 1
|
"""simple docstring"""
from __future__ import annotations
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a ):
__lowerCAmelCase , __lowerCAmelCase = text, pattern
__lowerCAmelCase , __lowerCAmelCase = len(__a ), len(__a )
def snake_case ( self , __a ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def snake_case ( self , __a ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def snake_case ( self ):
# searches pattern in text and returns index positions
__lowerCAmelCase = []
for i in range(self.textLen - self.patLen + 1 ):
__lowerCAmelCase = self.mismatch_in_text(__a )
if mismatch_index == -1:
positions.append(__a )
else:
__lowerCAmelCase = self.match_in_pattern(self.text[mismatch_index] )
__lowerCAmelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A : List[str] = "ABAABA"
A : Union[str, Any] = "AB"
A : Optional[int] = BoyerMooreSearch(text, pattern)
A : Any = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 57
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A : Union[str, Any] = imread(R"digital_image_processing/image_data/lena_small.jpg")
A : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = cn.convert_to_negative(_UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
'''simple docstring'''
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCamelCase , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase = canny.canny(_UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert gg.gaussian_filter(_UpperCamelCase , 5 , sigma=0.9 ).all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCAmelCase = conv.img_convolve(_UpperCamelCase , _UpperCamelCase ).astype(_UpperCamelCase )
assert res.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert med.median_filter(_UpperCamelCase , 3 ).any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(_UpperCamelCase )
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = sp.make_sepia(_UpperCamelCase , 20 )
assert sepia.all()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
__lowerCAmelCase = bs.Burkes(imread(_UpperCamelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
__lowerCAmelCase = rs.NearestNeighbour(imread(_UpperCamelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
__lowerCAmelCase = imread(_UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = image[x_coordinate][y_coordinate]
__lowerCAmelCase = lbp.get_neighbors_pixel(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__lowerCAmelCase = lbp.local_binary_value(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert lbp_image.any()
| 57
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : torch.FloatTensor
__UpperCAmelCase : torch.FloatTensor
__UpperCAmelCase : Optional[torch.FloatTensor] =None
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Tuple =2
@register_to_config
def __init__( self , __a = 0.0_2 , __a = 1_00 , __a = 1.0_0_7 , __a = 80 , __a = 0.0_5 , __a = 50 , ):
# standard deviation of the initial noise distribution
__lowerCAmelCase = sigma_max
# setable values
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None # sigma(t_i)
def snake_case ( self , __a , __a = None ):
return sample
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = num_inference_steps
__lowerCAmelCase = np.arange(0 , self.num_inference_steps )[::-1].copy()
__lowerCAmelCase = torch.from_numpy(__a ).to(__a )
__lowerCAmelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
__lowerCAmelCase = torch.tensor(__a , dtype=torch.floataa , device=__a )
def snake_case ( self , __a , __a , __a = None ):
if self.config.s_min <= sigma <= self.config.s_max:
__lowerCAmelCase = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
__lowerCAmelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
__lowerCAmelCase = self.config.s_noise * randn_tensor(sample.shape , generator=__a ).to(sample.device )
__lowerCAmelCase = sigma + gamma * sigma
__lowerCAmelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def snake_case ( self , __a , __a , __a , __a , __a = True , ):
__lowerCAmelCase = sample_hat + sigma_hat * model_output
__lowerCAmelCase = (sample_hat - pred_original_sample) / sigma_hat
__lowerCAmelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__a , derivative=__a , pred_original_sample=__a )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a = True , ):
__lowerCAmelCase = sample_prev + sigma_prev * model_output
__lowerCAmelCase = (sample_prev - pred_original_sample) / sigma_prev
__lowerCAmelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=__a , derivative=__a , pred_original_sample=__a )
def snake_case ( self , __a , __a , __a ):
raise NotImplementedError()
| 57
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Optional[int] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 1
|
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a ):
__lowerCAmelCase = str(id_ )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = []
__lowerCAmelCase = {} # {vertex:distance}
def __lt__( self , __a ):
return self.key < other.key
def __repr__( self ):
return self.id
def snake_case ( self , __a ):
self.neighbors.append(__a )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = weight
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] , _UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = []
for u in graph:
__lowerCAmelCase = math.inf
__lowerCAmelCase = None
__lowerCAmelCase = 0
__lowerCAmelCase = graph[:]
while q:
__lowerCAmelCase = min(_UpperCamelCase )
q.remove(_UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__lowerCAmelCase = u
__lowerCAmelCase = u.edges[v.id]
for i in range(1 , len(_UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
for u in graph:
__lowerCAmelCase = math.inf
__lowerCAmelCase = None
__lowerCAmelCase = 0
__lowerCAmelCase = list(_UpperCamelCase )
hq.heapify(_UpperCamelCase )
while h:
__lowerCAmelCase = hq.heappop(_UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__lowerCAmelCase = u
__lowerCAmelCase = u.edges[v.id]
hq.heapify(_UpperCamelCase )
for i in range(1 , len(_UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _lowerCamelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
A : Dict = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[float] =field(
default=0.0 ,metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
__UpperCAmelCase : bool =field(default=lowerCAmelCase__ ,metadata={"""help""": """Whether to SortishSamler or not."""} )
__UpperCAmelCase : bool =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
__UpperCAmelCase : bool =field(default=lowerCAmelCase__ ,metadata={"""help""": """whether to use adafactor"""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(default=lowerCAmelCase__ ,metadata={"""help""": """Dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[float] =field(
default=lowerCAmelCase__ ,metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
__UpperCAmelCase : Optional[str] =field(
default="""linear""" ,metadata={"""help""": F'''Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'''} ,)
| 57
| 1
|
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a=2 , __a=8 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=16 , __a=5 , __a=2 , __a=36 , __a="gelu" , __a=0.0 , __a=0.0 , __a=5_12 , __a=16 , __a=2 , __a=0.0_2 , __a=3 , __a=4 , __a=None , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def snake_case ( self ):
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
def snake_case ( self ):
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = 3_00
return config
def snake_case ( self ):
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
__lowerCAmelCase = True
__lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = MraModel(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a )
__lowerCAmelCase = model(__a , token_type_ids=__a )
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ):
__lowerCAmelCase = True
__lowerCAmelCase = MraModel(__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(
__a , attention_mask=__a , token_type_ids=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , )
__lowerCAmelCase = model(
__a , attention_mask=__a , token_type_ids=__a , encoder_hidden_states=__a , )
__lowerCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = MraForMaskedLM(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = MraForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MraForSequenceClassification(__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MraForTokenClassification(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = MraForMultipleChoice(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] =(
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : str =False
__UpperCAmelCase : int =False
__UpperCAmelCase : Optional[Any] =False
__UpperCAmelCase : Tuple =False
__UpperCAmelCase : List[Any] =()
def snake_case ( self ):
__lowerCAmelCase = MraModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__a , hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase = type
self.model_tester.create_and_check_model(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def snake_case ( self ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = MraModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip(reason="MRA does not output attentions" )
def snake_case ( self ):
return
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self ):
__lowerCAmelCase = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
__lowerCAmelCase = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(__a )[0]
__lowerCAmelCase = torch.Size((1, 2_56, 7_68) )
self.assertEqual(output.shape , __a )
__lowerCAmelCase = torch.tensor(
[[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@slow
def snake_case ( self ):
__lowerCAmelCase = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
__lowerCAmelCase = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(__a )[0]
__lowerCAmelCase = 5_02_65
__lowerCAmelCase = torch.Size((1, 2_56, vocab_size) )
self.assertEqual(output.shape , __a )
__lowerCAmelCase = torch.tensor(
[[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@slow
def snake_case ( self ):
__lowerCAmelCase = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
__lowerCAmelCase = torch.arange(40_96 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(__a )[0]
__lowerCAmelCase = 5_02_65
__lowerCAmelCase = torch.Size((1, 40_96, vocab_size) )
self.assertEqual(output.shape , __a )
__lowerCAmelCase = torch.tensor(
[[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 57
|
"""simple docstring"""
import argparse
import os
import re
import packaging.version
A : Any = "examples/"
A : Optional[Any] = {
"examples": (re.compile(R"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), R"\1version=\"VERSION\","),
"doc": (re.compile(R"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
A : Optional[int] = {
"init": "src/transformers/__init__.py",
"setup": "setup.py",
}
A : List[Any] = "README.md"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase , __lowerCAmelCase = REPLACE_PATTERNS[pattern]
__lowerCAmelCase = replace.replace("VERSION" , _UpperCamelCase )
__lowerCAmelCase = re_pattern.sub(_UpperCamelCase , _UpperCamelCase )
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for folder, directories, fnames in os.walk(_UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , pattern="examples" )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if not patch:
update_version_in_examples(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "🤗 Transformers currently provides the following architectures"
__lowerCAmelCase = "1. Want to contribute a new model?"
with open(_UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
__lowerCAmelCase = f.readlines()
# Find the start of the list.
__lowerCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowerCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
__lowerCAmelCase = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , )
index += 1
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_UpperCamelCase )
def _lowerCamelCase ( ):
'''simple docstring'''
with open(REPLACE_FILES["init"] , "r" ) as f:
__lowerCAmelCase = f.read()
__lowerCAmelCase = REPLACE_PATTERNS["init"][0].search(_UpperCamelCase ).groups()[0]
return packaging.version.parse(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase=False ):
'''simple docstring'''
__lowerCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
__lowerCAmelCase = default_version.base_version
elif patch:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__lowerCAmelCase = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__lowerCAmelCase = input(f"Which version are you releasing? [{default_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = default_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase , patch=_UpperCamelCase )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = get_version()
__lowerCAmelCase = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
__lowerCAmelCase = current_version.base_version
# Check with the user we got that right.
__lowerCAmelCase = input(f"Which version are we developing now? [{dev_version}]" )
if len(_UpperCamelCase ) == 0:
__lowerCAmelCase = dev_version
print(f"Updating version to {version}." )
global_version_update(_UpperCamelCase )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
A : Dict = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 57
| 1
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("List is empty" )
__lowerCAmelCase = sum(_UpperCamelCase ) / len(_UpperCamelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A : Tuple = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 1
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__lowerCAmelCase = remove_duplicates(key.upper() )
__lowerCAmelCase = len(_UpperCamelCase )
# First fill cipher with key characters
__lowerCAmelCase = {alphabet[i]: char for i, char in enumerate(_UpperCamelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_UpperCamelCase ) , 26 ):
__lowerCAmelCase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__lowerCAmelCase = alphabet[i - offset]
__lowerCAmelCase = char
return cipher_alphabet
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return "".join(cipher_map.get(_UpperCamelCase , _UpperCamelCase ) for ch in message.upper() )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_UpperCamelCase , _UpperCamelCase ) for ch in message.upper() )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = input("Enter message to encode or decode: " ).strip()
__lowerCAmelCase = input("Enter keyword: " ).strip()
__lowerCAmelCase = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
__lowerCAmelCase = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
__lowerCAmelCase = create_cipher_map(_UpperCamelCase )
print(func(_UpperCamelCase , _UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 57
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase = 6008_5147_5143 ):
'''simple docstring'''
try:
__lowerCAmelCase = int(_UpperCamelCase )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
__lowerCAmelCase = 2
__lowerCAmelCase = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__lowerCAmelCase = i
while n % i == 0:
__lowerCAmelCase = n // i
i += 1
return int(_UpperCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 57
| 1
|
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
__lowerCAmelCase = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(_UpperCamelCase )
DownloadCommand.register_subcommand(_UpperCamelCase )
EnvironmentCommand.register_subcommand(_UpperCamelCase )
RunCommand.register_subcommand(_UpperCamelCase )
ServeCommand.register_subcommand(_UpperCamelCase )
UserCommands.register_subcommand(_UpperCamelCase )
AddNewModelCommand.register_subcommand(_UpperCamelCase )
AddNewModelLikeCommand.register_subcommand(_UpperCamelCase )
LfsCommands.register_subcommand(_UpperCamelCase )
PTtoTFCommand.register_subcommand(_UpperCamelCase )
# Let's go
__lowerCAmelCase = parser.parse_args()
if not hasattr(_UpperCamelCase , "func" ):
parser.print_help()
exit(1 )
# Run
__lowerCAmelCase = args.func(_UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 57
|
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a=None , __a=True , __a=None , **__a ):
__lowerCAmelCase = parent
__lowerCAmelCase = config_class
__lowerCAmelCase = has_text_modality
__lowerCAmelCase = kwargs
__lowerCAmelCase = common_properties
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(__a , __a ) , msg=f"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(__a ):
try:
setattr(__a , __a , __a )
self.parent.assertEqual(
getattr(__a , __a ) , __a , msg=f"`{name} value {idx} expected, but was {getattr(__a , __a )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(__a ):
try:
__lowerCAmelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(__a , __a ) , __a , msg=f"`{name} value {idx} expected, but was {getattr(__a , __a )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , __a )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(__a , "config.json" )
config_first.to_json_file(__a )
__lowerCAmelCase = self.config_class.from_json_file(__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(__a )
__lowerCAmelCase = self.config_class.from_pretrained(__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(__a , __a )
config_first.save_pretrained(__a )
__lowerCAmelCase = self.config_class.from_pretrained(__a , subfolder=__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__lowerCAmelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def snake_case ( self ):
if self.config_class.is_composition:
return
__lowerCAmelCase = self.config_class()
self.parent.assertIsNotNone(__a )
def snake_case ( self ):
__lowerCAmelCase = copy.deepcopy(__a )
__lowerCAmelCase = self.config_class(**__a )
__lowerCAmelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(__a , __a ) != value:
wrong_values.append((key, getattr(__a , __a ), value) )
if len(__a ) > 0:
__lowerCAmelCase = "\n".join([f"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(f"The following keys were not properly set in the config:\n{errors}" )
def snake_case ( self ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 57
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
A : str = logging.get_logger(__name__)
A : str = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
A : Tuple = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
A : Optional[int] = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Dict ="""whisper"""
__UpperCAmelCase : str =["""past_key_values"""]
__UpperCAmelCase : List[str] ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __a=5_18_65 , __a=80 , __a=6 , __a=4 , __a=6 , __a=4 , __a=15_36 , __a=15_36 , __a=0.0 , __a=0.0 , __a=5_02_57 , __a=True , __a=True , __a="gelu" , __a=2_56 , __a=0.0 , __a=0.0 , __a=0.0 , __a=0.0_2 , __a=False , __a=15_00 , __a=4_48 , __a=5_02_56 , __a=5_02_56 , __a=5_02_56 , __a=None , __a=[2_20, 5_02_56] , __a=False , __a=2_56 , __a=False , __a=0.0_5 , __a=10 , __a=2 , __a=0.0 , __a=10 , __a=0 , __a=7 , **__a , ):
__lowerCAmelCase = vocab_size
__lowerCAmelCase = num_mel_bins
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase = max_source_positions
__lowerCAmelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase = classifier_proj_size
__lowerCAmelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase = apply_spec_augment
__lowerCAmelCase = mask_time_prob
__lowerCAmelCase = mask_time_length
__lowerCAmelCase = mask_time_min_masks
__lowerCAmelCase = mask_feature_prob
__lowerCAmelCase = mask_feature_length
__lowerCAmelCase = mask_feature_min_masks
__lowerCAmelCase = median_filter_width
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , suppress_tokens=__a , begin_suppress_tokens=__a , **__a , )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def snake_case ( self ):
__lowerCAmelCase = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
__lowerCAmelCase = {0: "batch"}
else:
__lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__a , direction="inputs" )
return common_inputs
def snake_case ( self , __a , __a = -1 , __a = -1 , __a = False , __a = None , __a = 2_20_50 , __a = 5.0 , __a = 2_20 , ):
__lowerCAmelCase = OrderedDict()
__lowerCAmelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=__a , framework=__a , sampling_rate=__a , time_duration=__a , frequency=__a , )
__lowerCAmelCase = encoder_inputs["input_features"].shape[2]
__lowerCAmelCase = encoder_sequence_length // 2 if self.use_past else seq_length
__lowerCAmelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , __a , __a , __a , __a )
__lowerCAmelCase = encoder_inputs.pop("input_features" )
__lowerCAmelCase = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
__lowerCAmelCase = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def snake_case ( self ):
return 1e-3
| 57
|
"""simple docstring"""
A : int = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 57
| 1
|
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
A : Dict = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
A : Any = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = list(s_dict.keys() )
for key in keys:
__lowerCAmelCase = key
for k, v in WHISPER_MAPPING.items():
if k in key:
__lowerCAmelCase = new_key.replace(_UpperCamelCase , _UpperCamelCase )
print(f"{key} -> {new_key}" )
__lowerCAmelCase = s_dict.pop(_UpperCamelCase )
return s_dict
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = emb.weight.shape
__lowerCAmelCase = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
__lowerCAmelCase = emb.weight.data
return lin_layer
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
__lowerCAmelCase = os.path.basename(_UpperCamelCase )
__lowerCAmelCase = url.split("/" )[-2]
__lowerCAmelCase = os.path.join(_UpperCamelCase , _UpperCamelCase )
if os.path.exists(_UpperCamelCase ) and not os.path.isfile(_UpperCamelCase ):
raise RuntimeError(f"{download_target} exists and is not a regular file" )
if os.path.isfile(_UpperCamelCase ):
__lowerCAmelCase = open(_UpperCamelCase , "rb" ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(_UpperCamelCase ) as source, open(_UpperCamelCase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=_UpperCamelCase , unit_divisor=1024 ) as loop:
while True:
__lowerCAmelCase = source.read(8192 )
if not buffer:
break
output.write(_UpperCamelCase )
loop.update(len(_UpperCamelCase ) )
__lowerCAmelCase = open(_UpperCamelCase , "rb" ).read()
if hashlib.shaaaa(_UpperCamelCase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if ".pt" not in checkpoint_path:
__lowerCAmelCase = _download(_MODELS[checkpoint_path] )
else:
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location="cpu" )
__lowerCAmelCase = original_checkpoint["dims"]
__lowerCAmelCase = original_checkpoint["model_state_dict"]
__lowerCAmelCase = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(_UpperCamelCase )
rename_keys(_UpperCamelCase )
__lowerCAmelCase = True
__lowerCAmelCase = state_dict["decoder.layers.0.fc1.weight"].shape[0]
__lowerCAmelCase = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=_UpperCamelCase , decoder_ffn_dim=_UpperCamelCase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
__lowerCAmelCase = WhisperForConditionalGeneration(_UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase = model.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
if len(_UpperCamelCase ) > 0 and not set(_UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
f" but all the following weights are missing {missing}" )
if tie_embeds:
__lowerCAmelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
__lowerCAmelCase = proj_out_weights
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
A : Tuple = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 57
|
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A : str = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =["""input_ids""", """attention_mask"""]
def __init__( self , __a="</s>" , __a="<unk>" , __a="<pad>" , __a=1_25 , __a=None , **__a , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__lowerCAmelCase = [f"<extra_id_{i}>" for i in range(__a )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__lowerCAmelCase = len(set(filter(lambda __a : bool("extra_id" in str(__a ) ) , __a ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
__lowerCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
super().__init__(
eos_token=__a , unk_token=__a , pad_token=__a , extra_ids=__a , additional_special_tokens=__a , **__a , )
__lowerCAmelCase = extra_ids
__lowerCAmelCase = 2**8 # utf is 8 bits
# define special tokens dict
__lowerCAmelCase = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__lowerCAmelCase = len(self.special_tokens_encoder )
__lowerCAmelCase = len(__a )
for i, token in enumerate(__a ):
__lowerCAmelCase = self.vocab_size + i - n
__lowerCAmelCase = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def snake_case ( self ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def snake_case ( self , __a , __a = None , __a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__a )) + [1]
return ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
def snake_case ( self , __a ):
if len(__a ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def snake_case ( self , __a , __a = None ):
__lowerCAmelCase = self._add_eos_if_not_present(__a )
if token_ids_a is None:
return token_ids_a
else:
__lowerCAmelCase = self._add_eos_if_not_present(__a )
return token_ids_a + token_ids_a
def snake_case ( self , __a ):
__lowerCAmelCase = [chr(__a ) for i in text.encode("utf-8" )]
return tokens
def snake_case ( self , __a ):
if token in self.special_tokens_encoder:
__lowerCAmelCase = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__lowerCAmelCase = self.added_tokens_encoder[token]
elif len(__a ) != 1:
__lowerCAmelCase = self.unk_token_id
else:
__lowerCAmelCase = ord(__a ) + self._num_special_tokens
return token_id
def snake_case ( self , __a ):
if index in self.special_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[index]
else:
__lowerCAmelCase = chr(index - self._num_special_tokens )
return token
def snake_case ( self , __a ):
__lowerCAmelCase = B""
for token in tokens:
if token in self.special_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
__lowerCAmelCase = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
__lowerCAmelCase = token.encode("utf-8" )
elif token in self.added_tokens_encoder:
__lowerCAmelCase = token.encode("utf-8" )
else:
__lowerCAmelCase = bytes([ord(__a )] )
bstring += tok_string
__lowerCAmelCase = bstring.decode("utf-8" , errors="ignore" )
return string
def snake_case ( self , __a , __a = None ):
return ()
| 57
| 1
|
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
A : Tuple = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
A : str = (
subprocess.check_output(f'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
)
A : Any = "|".join(sys.argv[1:])
A : Any = re.compile(Rf'''^({joined_dirs}).*?\.py$''')
A : Dict = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 57
|
"""simple docstring"""
import numpy
# List of input, output pairs
A : Any = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
A : str = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
A : Union[str, Any] = [2, 4, 1, 5]
A : int = len(train_data)
A : Dict = 0.009
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase="train" ):
'''simple docstring'''
return calculate_hypothesis_value(_UpperCamelCase , _UpperCamelCase ) - output(
_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
for i in range(len(_UpperCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=m ):
'''simple docstring'''
__lowerCAmelCase = 0
for i in range(_UpperCamelCase ):
if index == -1:
summation_value += _error(_UpperCamelCase )
else:
summation_value += _error(_UpperCamelCase ) * train_data[i][0][index]
return summation_value
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = summation_of_cost_derivative(_UpperCamelCase , _UpperCamelCase ) / m
return cost_derivative_value
def _lowerCamelCase ( ):
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__lowerCAmelCase = 0.00_00_02
__lowerCAmelCase = 0
__lowerCAmelCase = 0
while True:
j += 1
__lowerCAmelCase = [0, 0, 0, 0]
for i in range(0 , len(_UpperCamelCase ) ):
__lowerCAmelCase = get_cost_derivative(i - 1 )
__lowerCAmelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_UpperCamelCase , _UpperCamelCase , atol=_UpperCamelCase , rtol=_UpperCamelCase , ):
break
__lowerCAmelCase = temp_parameter_vector
print(("Number of iterations:", j) )
def _lowerCamelCase ( ):
'''simple docstring'''
for i in range(len(_UpperCamelCase ) ):
print(("Actual output value:", output(_UpperCamelCase , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(_UpperCamelCase , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 57
| 1
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Any =(DEISMultistepScheduler,)
__UpperCAmelCase : Optional[int] =(("""num_inference_steps""", 2_5),)
def snake_case ( self , **__a ):
__lowerCAmelCase = {
"num_train_timesteps": 10_00,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**__a )
return config
def snake_case ( self , __a=0 , **__a ):
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , __a )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config(**__a )
__lowerCAmelCase = scheduler_class(**__a )
scheduler.set_timesteps(__a )
# copy over dummy past residuals
__lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a )
__lowerCAmelCase = scheduler_class.from_pretrained(__a )
new_scheduler.set_timesteps(__a )
# copy over dummy past residuals
__lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase , __lowerCAmelCase = sample, sample
for t in range(__a , time_step + scheduler.config.solver_order + 1 ):
__lowerCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample
__lowerCAmelCase = new_scheduler.step(__a , __a , __a , **__a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case ( self ):
pass
def snake_case ( self , __a=0 , **__a ):
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , __a )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__a )
scheduler.set_timesteps(__a )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__a )
__lowerCAmelCase = scheduler_class.from_pretrained(__a )
# copy over dummy past residuals
new_scheduler.set_timesteps(__a )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample
__lowerCAmelCase = new_scheduler.step(__a , __a , __a , **__a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def snake_case ( self , __a=None , **__a ):
if scheduler is None:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(**__a )
__lowerCAmelCase = scheduler_class(**__a )
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(**__a )
__lowerCAmelCase = scheduler_class(**__a )
__lowerCAmelCase = 10
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase = model(__a , __a )
__lowerCAmelCase = scheduler.step(__a , __a , __a ).prev_sample
return sample
def snake_case ( self ):
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , __a )
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__a )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__a , "set_timesteps" ):
scheduler.set_timesteps(__a )
elif num_inference_steps is not None and not hasattr(__a , "set_timesteps" ):
__lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
__lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
__lowerCAmelCase = scheduler.timesteps[5]
__lowerCAmelCase = scheduler.timesteps[6]
__lowerCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample
__lowerCAmelCase = scheduler.step(__a , __a , __a , **__a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__lowerCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase = self.full_loop(scheduler=__a )
__lowerCAmelCase = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
__lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase = self.full_loop(scheduler=__a )
__lowerCAmelCase = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def snake_case ( self ):
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=__a )
def snake_case ( self ):
self.check_over_configs(thresholding=__a )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__a , prediction_type=__a , sample_max_value=__a , algorithm_type="deis" , solver_order=__a , solver_type=__a , )
def snake_case ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def snake_case ( self ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__a , solver_type=__a , prediction_type=__a , algorithm_type=__a , )
__lowerCAmelCase = self.full_loop(
solver_order=__a , solver_type=__a , prediction_type=__a , algorithm_type=__a , )
assert not torch.isnan(__a ).any(), "Samples have nan numbers"
def snake_case ( self ):
self.check_over_configs(lower_order_final=__a )
self.check_over_configs(lower_order_final=__a )
def snake_case ( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=__a , time_step=0 )
def snake_case ( self ):
__lowerCAmelCase = self.full_loop()
__lowerCAmelCase = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def snake_case ( self ):
__lowerCAmelCase = self.full_loop(prediction_type="v_prediction" )
__lowerCAmelCase = torch.mean(torch.abs(__a ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3
def snake_case ( self ):
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(thresholding=__a , dynamic_thresholding_ratio=0 )
__lowerCAmelCase = scheduler_class(**__a )
__lowerCAmelCase = 10
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase = model(__a , __a )
__lowerCAmelCase = scheduler.step(__a , __a , __a ).prev_sample
assert sample.dtype == torch.floataa
| 57
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__lowerCAmelCase = {
"do_resize": True,
"size": {"height": 2_24, "width": 2_24},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
"do_convert_rgb": True,
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__a , __a )
def snake_case ( self , **__a ):
return BertTokenizer.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , **__a ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , **__a ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __a )
self.assertIsInstance(processor_fast.tokenizer , __a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __a )
self.assertIsInstance(processor_fast.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)" )
__lowerCAmelCase = self.get_image_processor(do_normalize=__a )
__lowerCAmelCase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__a )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__a , return_tensors="np" )
__lowerCAmelCase = processor(images=__a , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = processor(text=__a )
__lowerCAmelCase = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__a )
__lowerCAmelCase = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = ChineseCLIPProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "Alexandra,T-shirt的价格是15便士。"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 57
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[List[PIL.Image.Image], np.ndarray]
__UpperCAmelCase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(">=", "0.0.12")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : np.ndarray
__UpperCAmelCase : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 57
|
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase ( _UpperCamelCase = 4 ):
'''simple docstring'''
__lowerCAmelCase = abs(_UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(_UpperCamelCase )] for y in range(_UpperCamelCase )]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_row(transpose(_UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_row(reverse_column(_UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return reverse_column(transpose(_UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [list(_UpperCamelCase ) for x in zip(*_UpperCamelCase )]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = matrix[::-1]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [x[::-1] for x in matrix]
return matrix
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
for i in matrix:
print(*_UpperCamelCase )
if __name__ == "__main__":
A : Dict = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
A : List[str] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
A : str = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 57
| 1
|
"""simple docstring"""
from __future__ import annotations
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a = 0 ):
__lowerCAmelCase = key
def snake_case ( self , __a , __a ):
assert isinstance(__a , __a ) and isinstance(__a , __a )
__lowerCAmelCase = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(__a ) ^ key ) for ch in content]
def snake_case ( self , __a , __a ):
assert isinstance(__a , __a ) and isinstance(__a , __a )
__lowerCAmelCase = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(__a ) ^ key ) for ch in content]
def snake_case ( self , __a , __a = 0 ):
assert isinstance(__a , __a ) and isinstance(__a , __a )
__lowerCAmelCase = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
__lowerCAmelCase = ""
for ch in content:
ans += chr(ord(__a ) ^ key )
return ans
def snake_case ( self , __a , __a = 0 ):
assert isinstance(__a , __a ) and isinstance(__a , __a )
__lowerCAmelCase = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
__lowerCAmelCase = ""
for ch in content:
ans += chr(ord(__a ) ^ key )
return ans
def snake_case ( self , __a , __a = 0 ):
assert isinstance(__a , __a ) and isinstance(__a , __a )
try:
with open(__a ) as fin, open("encrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__a , __a ) )
except OSError:
return False
return True
def snake_case ( self , __a , __a ):
assert isinstance(__a , __a ) and isinstance(__a , __a )
try:
with open(__a ) as fin, open("decrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__a , __a ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 57
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase : Union[str, Any] =TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = TextaTextGenerationPipeline(model=__a , tokenizer=__a )
return generator, ["Something to write", "Something else"]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = generator("Something there" )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) )
__lowerCAmelCase = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
__lowerCAmelCase = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
with self.assertRaises(__a ):
generator(4 )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
__lowerCAmelCase = 3
__lowerCAmelCase = generator(
"Something there" , num_return_sequences=__a , num_beams=__a , )
__lowerCAmelCase = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(__a , __a )
__lowerCAmelCase = generator("This is a test" , do_sample=__a , num_return_sequences=2 , return_tensors=__a )
self.assertEqual(
__a , [
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
] , )
__lowerCAmelCase = generator.model.config.eos_token_id
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = generator(
["This is a test", "This is a second test"] , do_sample=__a , num_return_sequences=2 , batch_size=2 , return_tensors=__a , )
self.assertEqual(
__a , [
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
] , )
@require_tf
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
| 57
| 1
|
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _lowerCamelCase ( _UpperCamelCase = True , *_UpperCamelCase , **_UpperCamelCase ):
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
__lowerCAmelCase = False
if main_process_only:
__lowerCAmelCase = PartialState().local_process_index == 0
return _tqdm(*_UpperCamelCase , **_UpperCamelCase , disable=_UpperCamelCase )
| 57
|
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _UpperCamelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCAmelCase = model
__lowerCAmelCase = 2
__lowerCAmelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def snake_case ( self ):
pass
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = LongformerModel.from_pretrained(_UpperCamelCase )
__lowerCAmelCase = LightningModel(_UpperCamelCase )
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
__lowerCAmelCase = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
A : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 57
| 1
|
"""simple docstring"""
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(4_2)
A : int = "bert-base-cased"
A : str = "fp16"
A : List[Any] = "bf16"
A : str = [FPaa, BFaa]
@require_fsdp
@require_cuda
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def snake_case ( self ):
super().setUp()
__lowerCAmelCase = dict(
ACCELERATE_USE_FSDP="true" , MASTER_ADDR="localhost" , MASTER_PORT="10999" , RANK="0" , LOCAL_RANK="0" , WORLD_SIZE="1" , )
def snake_case ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(__a ):
__lowerCAmelCase = self.dist_env.copy()
__lowerCAmelCase = f"{i + 1}"
__lowerCAmelCase = strategy
with mockenv_context(**__a ):
__lowerCAmelCase = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def snake_case ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(__a ):
__lowerCAmelCase = self.dist_env.copy()
__lowerCAmelCase = prefetch_policy
with mockenv_context(**__a ):
__lowerCAmelCase = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def snake_case ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(__a ):
__lowerCAmelCase = self.dist_env.copy()
__lowerCAmelCase = state_dict_type
with mockenv_context(**__a ):
__lowerCAmelCase = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def snake_case ( self ):
__lowerCAmelCase = AutoModel.from_pretrained(__a )
for policy in FSDP_AUTO_WRAP_POLICY:
__lowerCAmelCase = self.dist_env.copy()
__lowerCAmelCase = policy
if policy == "TRANSFORMER_BASED_WRAP":
__lowerCAmelCase = "BertLayer"
elif policy == "SIZE_BASED_WRAP":
__lowerCAmelCase = "2000"
with mockenv_context(**__a ):
__lowerCAmelCase = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__a )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
__lowerCAmelCase = self.dist_env.copy()
__lowerCAmelCase = "TRANSFORMER_BASED_WRAP"
__lowerCAmelCase = "T5Layer"
with mockenv_context(**__a ):
__lowerCAmelCase = FullyShardedDataParallelPlugin()
with self.assertRaises(__a ) as cm:
fsdp_plugin.set_auto_wrap_policy(__a )
self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception ) )
__lowerCAmelCase = self.dist_env.copy()
__lowerCAmelCase = "SIZE_BASED_WRAP"
__lowerCAmelCase = "0"
with mockenv_context(**__a ):
__lowerCAmelCase = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(__a )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def snake_case ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
__lowerCAmelCase = self.dist_env.copy()
__lowerCAmelCase = mp_dtype
with mockenv_context(**__a ):
__lowerCAmelCase = Accelerator()
if mp_dtype == "fp16":
__lowerCAmelCase = torch.floataa
elif mp_dtype == "bf16":
__lowerCAmelCase = torch.bfloataa
__lowerCAmelCase = MixedPrecision(param_dtype=__a , reduce_dtype=__a , buffer_dtype=__a )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , __a )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , __a ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(__a )
def snake_case ( self ):
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
__lowerCAmelCase = self.dist_env.copy()
__lowerCAmelCase = str(__a ).lower()
with mockenv_context(**__a ):
__lowerCAmelCase = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=__a ) )
@require_fsdp
@require_multi_gpu
@slow
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def snake_case ( self ):
super().setUp()
__lowerCAmelCase = 0.8_2
__lowerCAmelCase = [
"fsdp_shard_grad_op_transformer_based_wrap",
"fsdp_full_shard_transformer_based_wrap",
]
__lowerCAmelCase = {
"multi_gpu_fp16": 32_00,
"fsdp_shard_grad_op_transformer_based_wrap_fp16": 20_00,
"fsdp_full_shard_transformer_based_wrap_fp16": 19_00,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
__lowerCAmelCase = 1_60
__lowerCAmelCase = 1_60
__lowerCAmelCase = inspect.getfile(accelerate.test_utils )
__lowerCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps"] )
def snake_case ( self ):
__lowerCAmelCase = os.path.join(self.test_scripts_folder , "test_performance.py" )
__lowerCAmelCase = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"]
for config in self.performance_configs:
__lowerCAmelCase = cmd.copy()
for i, strategy in enumerate(__a ):
if strategy.lower() in config:
cmd_config.append(f"--fsdp_sharding_strategy={i+1}" )
break
if "fp32" in config:
cmd_config.append("--mixed_precision=no" )
else:
cmd_config.append("--mixed_precision=fp16" )
if "cpu_offload" in config:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f"--fsdp_auto_wrap_policy={policy}" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
f"--output_dir={self.tmpdir}",
f"--performance_lower_bound={self.performance_lower_bound}",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
def snake_case ( self ):
__lowerCAmelCase = os.path.join(self.test_scripts_folder , "test_checkpointing.py" )
__lowerCAmelCase = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--use_fsdp",
"--mixed_precision=fp16",
"--fsdp_transformer_layer_cls_to_wrap=BertLayer",
]
for i, strategy in enumerate(__a ):
__lowerCAmelCase = cmd.copy()
cmd_config.append(f"--fsdp_sharding_strategy={i+1}" )
if strategy != "FULL_SHARD":
continue
__lowerCAmelCase = len(__a )
for state_dict_type in FSDP_STATE_DICT_TYPE:
__lowerCAmelCase = cmd_config[:state_dict_config_index]
cmd_config.append(f"--fsdp_state_dict_type={state_dict_type}" )
cmd_config.extend(
[
self.test_file_path,
f"--output_dir={self.tmpdir}",
"--partial_train_epoch=1",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
__lowerCAmelCase = cmd_config[:-1]
__lowerCAmelCase = os.path.join(self.tmpdir , "epoch_0" )
cmd_config.extend(
[
f"--resume_from_checkpoint={resume_from_checkpoint}",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
def snake_case ( self ):
__lowerCAmelCase = os.path.join(self.test_scripts_folder , "test_peak_memory_usage.py" )
__lowerCAmelCase = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
__lowerCAmelCase = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["--mixed_precision=fp16"] )
else:
cmd_config.extend(["--mixed_precision=no"] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["--use_fsdp"] )
for i, strategy in enumerate(__a ):
if strategy.lower() in spec:
cmd_config.append(f"--fsdp_sharding_strategy={i+1}" )
break
if "cpu_offload" in spec:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f"--fsdp_auto_wrap_policy={policy}" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
f"--output_dir={self.tmpdir}",
f"--peak_memory_upper_bound={peak_mem_upper_bound}",
f"--n_train={self.n_train}",
f"--n_val={self.n_val}",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__a , env=os.environ.copy() )
| 57
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
__lowerCAmelCase = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
A : Union[str, Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
A : str = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 57
| 1
|
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
A : Optional[int] = TypeVar("KT")
A : Optional[Any] = TypeVar("VT")
class _UpperCamelCase ( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self , __a = "root" , __a = None ):
__lowerCAmelCase = key
__lowerCAmelCase = value
__lowerCAmelCase = []
def __repr__( self ):
return f"Node({self.key}: {self.value})"
@property
def snake_case ( self ):
return len(self.forward )
class _UpperCamelCase ( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self , __a = 0.5 , __a = 16 ):
__lowerCAmelCase = Node[KT, VT]()
__lowerCAmelCase = 0
__lowerCAmelCase = p
__lowerCAmelCase = max_level
def __str__( self ):
__lowerCAmelCase = list(self )
if len(__a ) == 0:
return f"SkipList(level={self.level})"
__lowerCAmelCase = max((len(str(__a ) ) for item in items) , default=4 )
__lowerCAmelCase = max(__a , 4 ) + 4
__lowerCAmelCase = self.head
__lowerCAmelCase = []
__lowerCAmelCase = node.forward.copy()
lines.append(f"[{node.key}]".ljust(__a , "-" ) + "* " * len(__a ) )
lines.append(" " * label_size + "| " * len(__a ) )
while len(node.forward ) != 0:
__lowerCAmelCase = node.forward[0]
lines.append(
f"[{node.key}]".ljust(__a , "-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(__a ) )
__lowerCAmelCase = node.forward
lines.append("None".ljust(__a ) + "* " * len(__a ) )
return f"SkipList(level={self.level})\n" + "\n".join(__a )
def __iter__( self ):
__lowerCAmelCase = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
__lowerCAmelCase = node.forward[0]
def snake_case ( self ):
__lowerCAmelCase = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def snake_case ( self , __a ):
__lowerCAmelCase = []
__lowerCAmelCase = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__lowerCAmelCase = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__a )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def snake_case ( self , __a ):
__lowerCAmelCase , __lowerCAmelCase = self._locate_node(__a )
if node is not None:
for i, update_node in enumerate(__a ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__lowerCAmelCase = node.forward[i]
else:
__lowerCAmelCase = update_node.forward[:i]
def snake_case ( self , __a , __a ):
__lowerCAmelCase , __lowerCAmelCase = self._locate_node(__a )
if node is not None:
__lowerCAmelCase = value
else:
__lowerCAmelCase = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __a ):
update_vector.append(self.head )
__lowerCAmelCase = level
__lowerCAmelCase = Node(__a , __a )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(__a )
else:
__lowerCAmelCase = new_node
def snake_case ( self , __a ):
__lowerCAmelCase , __lowerCAmelCase = self._locate_node(__a )
if node is not None:
return node.value
return None
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 12 )
skip_list.insert("Key3" , 41 )
skip_list.insert("Key4" , -19 )
__lowerCAmelCase = skip_list.head
__lowerCAmelCase = {}
while node.level != 0:
__lowerCAmelCase = node.forward[0]
__lowerCAmelCase = node.value
assert len(_UpperCamelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = SkipList()
skip_list.insert("Key1" , 10 )
skip_list.insert("Key1" , 12 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 10 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 10 )
__lowerCAmelCase = skip_list.head
__lowerCAmelCase = {}
while node.level != 0:
__lowerCAmelCase = node.forward[0]
__lowerCAmelCase = node.value
if len(_UpperCamelCase ) != 4:
print()
assert len(_UpperCamelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = SkipList()
assert skip_list.find("Some key" ) is None
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = SkipList()
skip_list.insert("Key2" , 20 )
assert skip_list.find("Key2" ) == 20
skip_list.insert("Some Key" , 10 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 13 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 10
assert skip_list.find("V" ) == 13
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 14
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 142 )
skip_list.insert("Key2" , 15 )
skip_list.delete("X" )
def traverse_keys(_UpperCamelCase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_UpperCamelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def _lowerCamelCase ( ):
'''simple docstring'''
def is_sorted(_UpperCamelCase ):
return all(next_item >= item for item, next_item in zip(_UpperCamelCase , lst[1:] ) )
__lowerCAmelCase = SkipList()
for i in range(10 ):
skip_list.insert(_UpperCamelCase , _UpperCamelCase )
assert is_sorted(list(_UpperCamelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_UpperCamelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_UpperCamelCase ) )
def _lowerCamelCase ( ):
'''simple docstring'''
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 57
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =["""image_processor""", """tokenizer"""]
__UpperCAmelCase : Optional[Any] ="""CLIPImageProcessor"""
__UpperCAmelCase : Union[str, Any] =("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self , __a=None , __a=None , **__a ):
__lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
__lowerCAmelCase = kwargs.pop("feature_extractor" )
__lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self , __a=None , __a=None , __a=None , **__a ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__lowerCAmelCase = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
__lowerCAmelCase = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
__lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.decode(*__a , **__a )
@property
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 57
| 1
|
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=16 , __a=36 , __a=6 , __a=6 , __a=6 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.0_2 , __a=3 , __a=4 , __a=None , ):
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = embedding_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_hidden_groups
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def snake_case ( self ):
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = AlbertModel(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a )
__lowerCAmelCase = model(__a , token_type_ids=__a )
__lowerCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = AlbertForPreTraining(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , sentence_order_label=__a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = AlbertForMaskedLM(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = AlbertForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = AlbertForSequenceClassification(__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = AlbertForTokenClassification(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = AlbertForMultipleChoice(config=__a )
model.to(__a )
model.eval()
__lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[str] =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : List[Any] =(
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Any =True
def snake_case ( self , __a , __a , __a=False ):
__lowerCAmelCase = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class in get_values(__a ):
__lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a )
__lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a )
return inputs_dict
def snake_case ( self ):
__lowerCAmelCase = AlbertModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=__a , hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a )
def snake_case ( self ):
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase = type
self.model_tester.create_and_check_model(*__a )
@slow
def snake_case ( self ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = AlbertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self ):
__lowerCAmelCase = AlbertModel.from_pretrained("albert-base-v2" )
__lowerCAmelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__lowerCAmelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCAmelCase = model(__a , attention_mask=__a )[0]
__lowerCAmelCase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , __a )
__lowerCAmelCase = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4 ) )
| 57
|
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class _UpperCamelCase :
'''simple docstring'''
pass
| 57
| 1
|
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
A : Union[str, Any] = logging.get_logger(__name__)
A : Dict = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
A : Dict = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
for attribute in key.split("." ):
__lowerCAmelCase = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
__lowerCAmelCase = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
__lowerCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
__lowerCAmelCase = value
elif weight_type == "weight_g":
__lowerCAmelCase = value
elif weight_type == "weight_v":
__lowerCAmelCase = value
elif weight_type == "bias":
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = fairseq_model.state_dict()
__lowerCAmelCase = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == "group" , )
__lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__lowerCAmelCase = True
if "*" in mapped_key:
__lowerCAmelCase = name.split(_UpperCamelCase )[0].split("." )[-2]
__lowerCAmelCase = mapped_key.replace("*" , _UpperCamelCase )
if "weight_g" in name:
__lowerCAmelCase = "weight_g"
elif "weight_v" in name:
__lowerCAmelCase = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
__lowerCAmelCase = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase = "weight"
else:
__lowerCAmelCase = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = full_name.split("conv_layers." )[-1]
__lowerCAmelCase = name.split("." )
__lowerCAmelCase = int(items[0] )
__lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__lowerCAmelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__lowerCAmelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__lowerCAmelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__lowerCAmelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
'''simple docstring'''
__lowerCAmelCase = torch.load(_UpperCamelCase )
__lowerCAmelCase = WavLMConfigOrig(checkpoint["cfg"] )
__lowerCAmelCase = WavLMOrig(_UpperCamelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
__lowerCAmelCase = WavLMConfig.from_pretrained(_UpperCamelCase )
else:
__lowerCAmelCase = WavLMConfig()
__lowerCAmelCase = WavLMModel(_UpperCamelCase )
recursively_load_weights(_UpperCamelCase , _UpperCamelCase )
hf_wavlm.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A : Optional[Any] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 57
|
"""simple docstring"""
import sys
from collections import defaultdict
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
return self.node_position[vertex]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = pos
def snake_case ( self , __a , __a , __a , __a ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCAmelCase = 2 * start + 1
else:
__lowerCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCAmelCase , __lowerCAmelCase = heap[smallest_child], positions[smallest_child]
__lowerCAmelCase , __lowerCAmelCase = (
heap[start],
positions[start],
)
__lowerCAmelCase , __lowerCAmelCase = temp, tempa
__lowerCAmelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __a )
self.top_to_bottom(__a , __a , __a , __a )
def snake_case ( self , __a , __a , __a , __a ):
__lowerCAmelCase = position[index]
while index != 0:
__lowerCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCAmelCase = heap[parent]
__lowerCAmelCase = position[parent]
self.set_position(position[parent] , __a )
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , __a )
break
__lowerCAmelCase = parent
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , 0 )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = len(__a ) // 2 - 1
for i in range(__a , -1 , -1 ):
self.top_to_bottom(__a , __a , len(__a ) , __a )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = positions[0]
__lowerCAmelCase = sys.maxsize
self.top_to_bottom(__a , 0 , len(__a ) , __a )
return temp
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = Heap()
__lowerCAmelCase = [0] * len(_UpperCamelCase )
__lowerCAmelCase = [-1] * len(_UpperCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCAmelCase = []
for vertex in range(len(_UpperCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCamelCase )
heap.node_position.append(_UpperCamelCase )
__lowerCAmelCase = []
__lowerCAmelCase = 1
__lowerCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCAmelCase = 0
__lowerCAmelCase = distance
heap.heapify(_UpperCamelCase , _UpperCamelCase )
for _ in range(1 , len(_UpperCamelCase ) ):
__lowerCAmelCase = heap.delete_minimum(_UpperCamelCase , _UpperCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCamelCase )]
):
__lowerCAmelCase = distance
heap.bottom_to_top(
_UpperCamelCase , heap.get_position(_UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A : Optional[Any] = int(input("Enter number of edges: ").strip())
A : Dict = defaultdict(list)
for _ in range(edges_number):
A : str = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 57
| 1
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def _lowerCamelCase ( _UpperCamelCase=None ):
'''simple docstring'''
if subparsers is not None:
__lowerCAmelCase = subparsers.add_parser("env" )
else:
__lowerCAmelCase = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file" , default=_UpperCamelCase , help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=_UpperCamelCase )
return parser
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = torch.__version__
__lowerCAmelCase = torch.cuda.is_available()
__lowerCAmelCase = is_xpu_available()
__lowerCAmelCase = is_npu_available()
__lowerCAmelCase = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_UpperCamelCase ):
__lowerCAmelCase = load_config_from_file(args.config_file ).to_dict()
__lowerCAmelCase = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})",
"PyTorch XPU available": str(_UpperCamelCase ),
"PyTorch NPU available": str(_UpperCamelCase ),
"System RAM": f"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB",
}
if pt_cuda_available:
__lowerCAmelCase = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([f"- {prop}: {val}" for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
__lowerCAmelCase = (
"\n".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(_UpperCamelCase , _UpperCamelCase )
else f"\t{accelerate_config}"
)
print(_UpperCamelCase )
__lowerCAmelCase = accelerate_config
return info
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = env_command_parser()
__lowerCAmelCase = parser.parse_args()
env_command(_UpperCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 57
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : Tuple = logging.get_logger(__name__)
A : Tuple = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
A : Optional[Any] = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = torch.load(_UpperCamelCase , map_location="cpu" )
return sd
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=rename_keys_prefix ):
'''simple docstring'''
__lowerCAmelCase = OrderedDict()
__lowerCAmelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__lowerCAmelCase = key
for name_pair in rename_keys_prefix:
__lowerCAmelCase = new_key.replace(name_pair[0] , name_pair[1] )
__lowerCAmelCase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__lowerCAmelCase = new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), f"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."
# Get Config
if "pre" in checkpoint_path:
__lowerCAmelCase = "pretraining"
if "vcr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 1024}
else:
raise NotImplementedError(f"No implementation found for `{checkpoint_path}`." )
else:
if "vcr" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 512}
__lowerCAmelCase = "multichoice"
elif "vqa_advanced" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048}
__lowerCAmelCase = "vqa_advanced"
elif "vqa" in checkpoint_path:
__lowerCAmelCase = {"visual_embedding_dim": 2048, "num_labels": 3129}
__lowerCAmelCase = "vqa"
elif "nlvr" in checkpoint_path:
__lowerCAmelCase = {
"visual_embedding_dim": 1024,
"num_labels": 2,
}
__lowerCAmelCase = "nlvr"
__lowerCAmelCase = VisualBertConfig(**_UpperCamelCase )
# Load State Dict
__lowerCAmelCase = load_state_dict(_UpperCamelCase )
__lowerCAmelCase = get_new_dict(_UpperCamelCase , _UpperCamelCase )
if model_type == "pretraining":
__lowerCAmelCase = VisualBertForPreTraining(_UpperCamelCase )
elif model_type == "vqa":
__lowerCAmelCase = VisualBertForQuestionAnswering(_UpperCamelCase )
elif model_type == "nlvr":
__lowerCAmelCase = VisualBertForVisualReasoning(_UpperCamelCase )
elif model_type == "multichoice":
__lowerCAmelCase = VisualBertForMultipleChoice(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
# Save Checkpoints
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
A : Optional[int] = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 57
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCamelCase ( lowerCAmelCase__ ,unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =LEDTokenizer
__UpperCAmelCase : Optional[Any] =LEDTokenizerFast
__UpperCAmelCase : List[Any] =True
def snake_case ( self ):
super().setUp()
__lowerCAmelCase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
__lowerCAmelCase = dict(zip(__a , range(len(__a ) ) ) )
__lowerCAmelCase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
__lowerCAmelCase = {"unk_token": "<unk>"}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__a ) )
def snake_case ( self , **__a ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , **__a ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , __a ):
return "lower newer", "lower newer"
@cached_property
def snake_case ( self ):
return LEDTokenizer.from_pretrained("allenai/led-base-16384" )
@cached_property
def snake_case ( self ):
return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
__lowerCAmelCase = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(__a , max_length=len(__a ) , padding=__a , return_tensors="pt" )
self.assertIsInstance(__a , __a )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__a , __a )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(__a , padding=__a , return_tensors="pt" )
self.assertIn("input_ids" , __a )
self.assertIn("attention_mask" , __a )
self.assertNotIn("labels" , __a )
self.assertNotIn("decoder_attention_mask" , __a )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(text_target=__a , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def snake_case ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] , padding=__a , truncation=__a , return_tensors="pt" )
self.assertIsInstance(__a , __a )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = ["A long paragraph for summarization."]
__lowerCAmelCase = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(__a , return_tensors="pt" )
__lowerCAmelCase = tokenizer(text_target=__a , return_tensors="pt" )
__lowerCAmelCase = inputs["input_ids"]
__lowerCAmelCase = targets["input_ids"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def snake_case ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = ["Summary of the text.", "Another summary."]
__lowerCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__lowerCAmelCase = tokenizer(__a , padding=__a )
__lowerCAmelCase = [[0] * len(__a ) for x in encoded_output["input_ids"]]
__lowerCAmelCase = tokenizer.pad(__a )
self.assertSequenceEqual(outputs["global_attention_mask"] , __a )
def snake_case ( self ):
pass
def snake_case ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__a , **__a )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(__a , **__a )
__lowerCAmelCase = "A, <mask> AllenNLP sentence."
__lowerCAmelCase = tokenizer_r.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
__lowerCAmelCase = tokenizer_p.encode_plus(__a , add_special_tokens=__a , return_token_type_ids=__a )
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
__lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
__lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__a , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__a , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 57
|
"""simple docstring"""
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
pass
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
pass
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = [
[],
[],
[],
]
def snake_case ( self , __a , __a ):
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(__a )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def snake_case ( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self ):
return "\n".join(f"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
if len(self.queue ) == 1_00:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(__a )
def snake_case ( self ):
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
__lowerCAmelCase = min(self.queue )
self.queue.remove(__a )
return data
def __str__( self ):
return str(self.queue )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_UpperCamelCase )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_UpperCamelCase )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 57
| 1
|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
A : Optional[Any] = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
A : Optional[int] = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
A : Any = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def snake_case ( self ):
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def snake_case ( self , __a , __a , __a = False , __a = False , __a = False , __a = False , ):
__lowerCAmelCase = len(references[0] )
if any(len(__a ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
__lowerCAmelCase = [[refs[i] for refs in references] for i in range(__a )]
__lowerCAmelCase = TER(
normalized=__a , no_punct=__a , asian_support=__a , case_sensitive=__a , )
__lowerCAmelCase = sb_ter.corpus_score(__a , __a )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 57
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
__lowerCAmelCase = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_UpperCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 57
| 1
|
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
A : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
__lowerCAmelCase = (
f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , __a , standard_warn=__a )
__lowerCAmelCase = dict(scheduler.config )
__lowerCAmelCase = 1
__lowerCAmelCase = FrozenDict(__a )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
__lowerCAmelCase = (
f"The configuration file of this scheduler: {scheduler} has not set the configuration"
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , __a , standard_warn=__a )
__lowerCAmelCase = dict(scheduler.config )
__lowerCAmelCase = True
__lowerCAmelCase = FrozenDict(__a )
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=__a , segmentation_processor=__a , vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , )
def snake_case ( self , __a = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowerCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def snake_case ( self ):
self.enable_attention_slicing(__a )
def snake_case ( self ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__lowerCAmelCase = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , __a , __a , __a , __a = 5_12 , __a = 5_12 , __a = 50 , __a = 7.5 , __a = None , __a = 1 , __a = 0.0 , __a = None , __a = None , __a = "pil" , __a = True , __a = None , __a = 1 , **__a , ):
__lowerCAmelCase = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
__lowerCAmelCase = self.segmentation_model(**__a )
__lowerCAmelCase = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__lowerCAmelCase = self.numpy_to_pil(__a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__lowerCAmelCase = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__a , image=__a , mask_image=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , )
| 57
|
"""simple docstring"""
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = 1
while len(_UpperCamelCase ) < 1e6:
constant.append(str(_UpperCamelCase ) )
i += 1
__lowerCAmelCase = "".join(_UpperCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 57
| 1
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
A : str = True
except (ImportError, ModuleNotFoundError):
A : Optional[int] = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
re.sub("<n>" , "" , _UpperCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_UpperCamelCase ) )
| 57
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A : Union[str, Any] = imread(R"digital_image_processing/image_data/lena_small.jpg")
A : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = cn.convert_to_negative(_UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
'''simple docstring'''
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCamelCase , 110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__lowerCAmelCase = canny.canny(_UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert gg.gaussian_filter(_UpperCamelCase , 5 , sigma=0.9 ).all()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__lowerCAmelCase = conv.img_convolve(_UpperCamelCase , _UpperCamelCase ).astype(_UpperCamelCase )
assert res.any()
def _lowerCamelCase ( ):
'''simple docstring'''
assert med.median_filter(_UpperCamelCase , 3 ).any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = sob.sobel_filter(_UpperCamelCase )
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = sp.make_sepia(_UpperCamelCase , 20 )
assert sepia.all()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
__lowerCAmelCase = bs.Burkes(imread(_UpperCamelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( _UpperCamelCase = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
__lowerCAmelCase = rs.NearestNeighbour(imread(_UpperCamelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
__lowerCAmelCase = imread(_UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = image[x_coordinate][y_coordinate]
__lowerCAmelCase = lbp.get_neighbors_pixel(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__lowerCAmelCase = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__lowerCAmelCase = lbp.local_binary_value(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert lbp_image.any()
| 57
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
A : Optional[Any] = {"configuration_vit": ["VIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTConfig", "ViTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ["ViTFeatureExtractor"]
A : int = ["ViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"VIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTForImageClassification",
"ViTForMaskedImageModeling",
"ViTModel",
"ViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
"TFViTForImageClassification",
"TFViTModel",
"TFViTPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"FlaxViTForImageClassification",
"FlaxViTModel",
"FlaxViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Optional[int] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.