code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__lowercase = False
__lowercase = True
__lowercase = False
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
__lowercase = parser.parse_args()
__lowercase = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
__lowercase = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
__lowercase = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
__lowercase = reader.read()
__lowercase = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
__lowercase = UNetaDModel(**config)
else:
__lowercase = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
__lowercase = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__lowercase = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__lowercase = config[key]
del config[key]
__lowercase = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
__lowercase = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
__lowercase = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
__lowercase = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
__lowercase = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
__lowercase = param_value
__lowercase = True
if not has_changed:
__lowercase = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 40
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def lowercase ( A_ )-> Dict:
'''simple docstring'''
a : str = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
a : Union[str, Any] = 128
elif "12-12" in model_name:
a : List[Any] = 12
a : str = 12
elif "14-14" in model_name:
a : List[Any] = 14
a : Optional[int] = 14
elif "16-16" in model_name:
a : Any = 16
a : List[Any] = 16
else:
raise ValueError("Model not supported" )
a : Optional[int] = "huggingface/label-files"
if "speech-commands" in model_name:
a : Optional[int] = 35
a : List[str] = "speech-commands-v2-id2label.json"
else:
a : Optional[Any] = 527
a : Tuple = "audioset-id2label.json"
a : List[str] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
a : Union[str, Any] = {int(A_ ): v for k, v in idalabel.items()}
a : Any = idalabel
a : str = {v: k for k, v in idalabel.items()}
return config
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
if "module.v" in name:
a : Union[str, Any] = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
a : List[Any] = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
a : Union[str, Any] = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
a : str = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
a : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
a : Union[str, Any] = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
a : str = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
a : Tuple = name.replace("attn" , "attention.self" )
if "norm1" in name:
a : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
a : Union[str, Any] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
a : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a : Optional[Any] = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
a : Tuple = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
a : List[str] = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
a : Optional[int] = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def lowercase ( A_ , A_ )-> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
a : str = orig_state_dict.pop(A_ )
if "qkv" in key:
a : int = key.split("." )
a : Optional[int] = int(key_split[3] )
a : int = config.hidden_size
if "weight" in key:
a : List[str] = val[:dim, :]
a : Any = val[dim : dim * 2, :]
a : int = val[-dim:, :]
else:
a : Optional[Any] = val[:dim]
a : Union[str, Any] = val[dim : dim * 2]
a : str = val[-dim:]
else:
a : str = val
return orig_state_dict
def lowercase ( A_ )-> Dict:
'''simple docstring'''
a : Union[str, Any] = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(A_ , A_ )
@torch.no_grad()
def lowercase ( A_ , A_ , A_=False )-> Optional[int]:
'''simple docstring'''
a : Optional[int] = get_audio_spectrogram_transformer_config(A_ )
a : Dict = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
a : Any = model_name_to_url[model_name]
a : List[Any] = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" )
# remove some keys
remove_keys(A_ )
# rename some keys
a : Union[str, Any] = convert_state_dict(A_ , A_ )
# load 🤗 model
a : List[str] = ASTForAudioClassification(A_ )
model.eval()
model.load_state_dict(A_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
a : Tuple = -4.2_6_7_7_3_9_3 if "speech-commands" not in model_name else -6.8_4_5_9_7_8
a : Union[str, Any] = 4.5_6_8_9_9_7_4 if "speech-commands" not in model_name else 5.5_6_5_4_5_2_6
a : str = 1_024 if "speech-commands" not in model_name else 128
a : List[Any] = ASTFeatureExtractor(mean=A_ , std=A_ , max_length=A_ )
if "speech-commands" in model_name:
a : List[str] = load_dataset("speech_commands" , "v0.02" , split="validation" )
a : int = dataset[0]["audio"]["array"]
else:
a : Tuple = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
a , a : Tuple = torchaudio.load(A_ )
a : Optional[Any] = waveform.squeeze().numpy()
a : Union[str, Any] = feature_extractor(A_ , sampling_rate=16_000 , return_tensors="pt" )
# forward pass
a : Optional[Any] = model(**A_ )
a : List[str] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
a : Any = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
a : Optional[int] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
a : List[str] = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
a : Tuple = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
a : int = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
a : Any = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
a : Dict = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] )
elif model_name == "ast-finetuned-speech-commands-v2":
a : Union[str, Any] = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , A_ , atol=1e-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(A_ ).mkdir(exist_ok=A_ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(A_ )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowercase = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 40
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple=13 , __UpperCAmelCase : List[str]=3 , __UpperCAmelCase : Union[str, Any]=224 , __UpperCAmelCase : Optional[int]=30 , __UpperCAmelCase : Tuple=400 , __UpperCAmelCase : int=True , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : int=True , __UpperCAmelCase : Dict=[0.5, 0.5, 0.5] , __UpperCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , ):
a : Dict = size if size is not None else {"height": 18, "width": 18}
a : Dict = parent
a : Any = batch_size
a : Dict = num_channels
a : str = image_size
a : str = min_resolution
a : List[str] = max_resolution
a : Optional[int] = do_resize
a : Union[str, Any] = size
a : Union[str, Any] = do_normalize
a : str = image_mean
a : List[Any] = image_std
def __snake_case ( self : int):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : List[str] = ViTImageProcessor if is_vision_available() else None
def __snake_case ( self : Optional[int]):
a : Tuple = EfficientFormerImageProcessorTester(self)
@property
def __snake_case ( self : int):
return self.image_proc_tester.prepare_image_processor_dict()
def __snake_case ( self : Tuple):
a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__UpperCAmelCase , "image_mean"))
self.assertTrue(hasattr(__UpperCAmelCase , "image_std"))
self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize"))
self.assertTrue(hasattr(__UpperCAmelCase , "do_resize"))
self.assertTrue(hasattr(__UpperCAmelCase , "size"))
def __snake_case ( self : str):
pass
def __snake_case ( self : Optional[int]):
# Initialize image_processor
a : int = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a : int = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image)
# Test not batched input
a : Tuple = image_processor(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
a : str = image_processor(__UpperCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __snake_case ( self : List[str]):
# Initialize image_processor
a : Dict = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a : Optional[int] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray)
# Test not batched input
a : Union[str, Any] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
a : Any = image_processor(__UpperCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __snake_case ( self : Dict):
# Initialize image_processor
a : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor)
# Test not batched input
a : Any = image_processor(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
a : List[Any] = image_processor(__UpperCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 40
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase = {
"""configuration_rag""": ["""RagConfig"""],
"""retrieval_rag""": ["""RagRetriever"""],
"""tokenization_rag""": ["""RagTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""RagModel""",
"""RagPreTrainedModel""",
"""RagSequenceForGeneration""",
"""RagTokenForGeneration""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TFRagModel""",
"""TFRagPreTrainedModel""",
"""TFRagSequenceForGeneration""",
"""TFRagTokenForGeneration""",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _A ( _a ,_a ,_a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : str = StableDiffusionInpaintPipeline
UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase : Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase : int = frozenset([] )
def __snake_case ( self : Dict):
torch.manual_seed(0)
a : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , )
a : Tuple = PNDMScheduler(skip_prk_steps=__UpperCAmelCase)
torch.manual_seed(0)
a : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
a : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
a : Any = CLIPTextModel(__UpperCAmelCase)
a : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
a : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any]=0):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase)
a : List[str] = image.cpu().permute(0 , 2 , 3 , 1)[0]
a : Union[str, Any] = Image.fromarray(np.uinta(__UpperCAmelCase)).convert("RGB").resize((64, 64))
a : Dict = Image.fromarray(np.uinta(image + 4)).convert("RGB").resize((64, 64))
if str(__UpperCAmelCase).startswith("mps"):
a : Tuple = torch.manual_seed(__UpperCAmelCase)
else:
a : Tuple = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase)
a : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __snake_case ( self : List[str]):
a : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
a : Tuple = self.get_dummy_components()
a : Optional[int] = StableDiffusionInpaintPipeline(**__UpperCAmelCase)
a : int = sd_pipe.to(__UpperCAmelCase)
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a : Any = self.get_dummy_inputs(__UpperCAmelCase)
a : Optional[int] = sd_pipe(**__UpperCAmelCase).images
a : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a : int = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __snake_case ( self : str):
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Union[str, Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Dict):
a : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
a : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
a : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy")
a : Tuple = "stabilityai/stable-diffusion-2-inpainting"
a : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(__UpperCAmelCase , safety_checker=__UpperCAmelCase)
pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
pipe.enable_attention_slicing()
a : Any = "Face of a yellow cat, high resolution, sitting on a park bench"
a : str = torch.manual_seed(0)
a : Union[str, Any] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 9e-3
def __snake_case ( self : Any):
a : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
a : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
a : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy")
a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting"
a : Any = StableDiffusionInpaintPipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=__UpperCAmelCase , )
pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
pipe.enable_attention_slicing()
a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench"
a : Dict = torch.manual_seed(0)
a : List[Any] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , )
a : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5e-1
def __snake_case ( self : int):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
a : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting"
a : Optional[int] = PNDMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler")
a : int = StableDiffusionInpaintPipeline.from_pretrained(
__UpperCAmelCase , safety_checker=__UpperCAmelCase , scheduler=__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench"
a : Optional[int] = torch.manual_seed(0)
a : str = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="np" , )
a : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 40
| 1
|
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowercase ( A_ )-> Any:
'''simple docstring'''
a : Tuple = FileLock(str(tmpdir / "foo.lock" ) )
a : Tuple = FileLock(str(tmpdir / "foo.lock" ) )
a : List[str] = 0.0_1
with locka.acquire():
with pytest.raises(A_ ):
a : Tuple = time.time()
locka.acquire(A_ )
assert time.time() - _start > timeout
def lowercase ( A_ )-> Optional[int]:
'''simple docstring'''
a : Optional[Any] = "a" * 1_000 + ".lock"
a : Dict = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(A_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
a : int = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(A_ ):
locka.acquire(0 )
| 40
|
"""simple docstring"""
def lowercase ( A_ )-> bool:
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
a : Tuple = sorted(string.lower() )
return len(A_ ) == len(set(A_ ) )
if __name__ == "__main__":
__lowercase = input("""Enter a string """).strip()
__lowercase = is_isogram(input_str)
print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
| 40
| 1
|
"""simple docstring"""
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
__lowercase = """scheduler_config.json"""
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Tuple = 1
UpperCAmelCase : Tuple = 2
UpperCAmelCase : List[Any] = 3
UpperCAmelCase : Optional[Any] = 4
UpperCAmelCase : Any = 5
UpperCAmelCase : List[Any] = 6
UpperCAmelCase : Optional[Any] = 7
UpperCAmelCase : Optional[int] = 8
UpperCAmelCase : Optional[int] = 9
UpperCAmelCase : str = 1_0
UpperCAmelCase : Dict = 1_1
UpperCAmelCase : List[str] = 1_2
UpperCAmelCase : Union[str, Any] = 1_3
UpperCAmelCase : Tuple = 1_4
@dataclass
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : torch.FloatTensor
class _A :
"""simple docstring"""
UpperCAmelCase : Optional[Any] = SCHEDULER_CONFIG_NAME
UpperCAmelCase : Dict = []
UpperCAmelCase : List[Any] = True
@classmethod
def __snake_case ( cls : Union[str, Any] , __UpperCAmelCase : Dict[str, Any] = None , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Dict=False , **__UpperCAmelCase : Dict , ):
a , a , a : Optional[Any] = cls.load_config(
pretrained_model_name_or_path=__UpperCAmelCase , subfolder=__UpperCAmelCase , return_unused_kwargs=__UpperCAmelCase , return_commit_hash=__UpperCAmelCase , **__UpperCAmelCase , )
return cls.from_config(__UpperCAmelCase , return_unused_kwargs=__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : List[str] , __UpperCAmelCase : Union[str, os.PathLike] , __UpperCAmelCase : bool = False , **__UpperCAmelCase : str):
self.save_config(save_directory=__UpperCAmelCase , push_to_hub=__UpperCAmelCase , **__UpperCAmelCase)
@property
def __snake_case ( self : Optional[Any]):
return self._get_compatibles()
@classmethod
def __snake_case ( cls : Tuple):
a : Dict = list(set([cls.__name__] + cls._compatibles))
a : Any = importlib.import_module(__name__.split(".")[0])
a : Dict = [
getattr(__UpperCAmelCase , __UpperCAmelCase) for c in compatible_classes_str if hasattr(__UpperCAmelCase , __UpperCAmelCase)
]
return compatible_classes
| 40
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__lowercase = datasets.utils.logging.get_logger(__name__)
@dataclass
class _A ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase : int = 1_0_0_0_0
UpperCAmelCase : Optional[List[str]] = None
UpperCAmelCase : Optional[datasets.Features] = None
class _A ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase : str = ParquetConfig
def __snake_case ( self : Tuple):
return datasets.DatasetInfo(features=self.config.features)
def __snake_case ( self : List[Any] , __UpperCAmelCase : str):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''')
a : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__UpperCAmelCase , (str, list, tuple)):
a : Dict = data_files
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a : List[Any] = [dl_manager.iter_files(__UpperCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})]
a : Dict = []
for split_name, files in data_files.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a : Tuple = [dl_manager.iter_files(__UpperCAmelCase) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__UpperCAmelCase):
with open(__UpperCAmelCase , "rb") as f:
a : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase))
break
splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"files": files}))
return splits
def __snake_case ( self : List[str] , __UpperCAmelCase : pa.Table):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
a : Optional[int] = table_cast(__UpperCAmelCase , self.info.features.arrow_schema)
return pa_table
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int):
a : Tuple = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema) != sorted(self.config.columns):
raise ValueError(
f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''')
for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase)):
with open(__UpperCAmelCase , "rb") as f:
a : Tuple = pq.ParquetFile(__UpperCAmelCase)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)):
a : Optional[Any] = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'''{file_idx}_{batch_idx}''', self._cast_table(__UpperCAmelCase)
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(__UpperCAmelCase)}: {e}''')
raise
| 40
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase ( A_ , A_ )-> list[list[int]]:
'''simple docstring'''
a : list[list[int]] = []
a : list[int] = []
a : List[str] = 0
a : List[str] = sum(A_ )
create_state_space_tree(A_ , A_ , A_ , A_ , A_ , A_ )
return result
def lowercase ( A_ , A_ , A_ , A_ , A_ , A_ , )-> None:
'''simple docstring'''
if sum(A_ ) > max_sum or (remaining_nums_sum + sum(A_ )) < max_sum:
return
if sum(A_ ) == max_sum:
result.append(A_ )
return
for index in range(A_ , len(A_ ) ):
create_state_space_tree(
A_ , A_ , index + 1 , [*path, nums[index]] , A_ , remaining_nums_sum - nums[index] , )
__lowercase = [3, 34, 4, 12, 5, 2]
__lowercase = 9
__lowercase = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 40
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : int = """dpr"""
def __init__( self : List[Any] , __UpperCAmelCase : int=30522 , __UpperCAmelCase : Union[str, Any]=768 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : str="absolute" , __UpperCAmelCase : int = 0 , **__UpperCAmelCase : Tuple , ):
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a : List[Any] = vocab_size
a : Optional[Any] = hidden_size
a : Union[str, Any] = num_hidden_layers
a : Dict = num_attention_heads
a : int = hidden_act
a : Any = intermediate_size
a : Any = hidden_dropout_prob
a : Dict = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Union[str, Any] = type_vocab_size
a : Optional[Any] = initializer_range
a : Dict = layer_norm_eps
a : int = projection_dim
a : str = position_embedding_type
| 40
| 1
|
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _A :
"""simple docstring"""
def __init__( self : str , __UpperCAmelCase : Optional[Any] , ):
a : List[str] = parent
a : Optional[Any] = 13
a : Dict = 7
a : Dict = 30
a : Optional[int] = self.seq_length + self.mem_len
a : Optional[int] = 15
a : Dict = True
a : List[str] = True
a : Union[str, Any] = 99
a : Any = [10, 50, 80]
a : Dict = 32
a : Dict = 32
a : Optional[int] = 4
a : List[Any] = 8
a : List[Any] = 128
a : Optional[int] = 2
a : List[Any] = 2
a : Tuple = None
a : str = 1
a : Dict = 0
a : Any = 3
a : Any = self.vocab_size - 1
a : Optional[int] = 0.01
def __snake_case ( self : Union[str, Any]):
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : Any = None
if self.use_labels:
a : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : Tuple = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __snake_case ( self : List[str]):
random.seed(self.seed)
tf.random.set_seed(self.seed)
def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : int):
a : List[str] = TFTransfoXLModel(__UpperCAmelCase)
a , a : List[str] = model(__UpperCAmelCase).to_tuple()
a : Optional[Any] = {"input_ids": input_ids_a, "mems": mems_a}
a , a : Union[str, Any] = model(__UpperCAmelCase).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any]):
a : List[Any] = TFTransfoXLLMHeadModel(__UpperCAmelCase)
a , a : Optional[Any] = model(__UpperCAmelCase).to_tuple()
a : Optional[int] = {"input_ids": input_ids_a, "labels": lm_labels}
a , a : Optional[int] = model(__UpperCAmelCase).to_tuple()
a , a : Optional[int] = model([input_ids_a, mems_a]).to_tuple()
a : Tuple = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
a , a : List[str] = model(__UpperCAmelCase).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __snake_case ( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str]):
a : Dict = TFTransfoXLForSequenceClassification(__UpperCAmelCase)
a : int = model(__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __snake_case ( self : Union[str, Any]):
a : Optional[Any] = self.prepare_config_and_inputs()
((a) , (a) , (a) , (a)) : List[Any] = config_and_inputs
a : List[str] = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class _A ( _a ,_a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Any = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
UpperCAmelCase : List[Any] = () if is_tf_available() else ()
UpperCAmelCase : Tuple = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
UpperCAmelCase : Any = False
UpperCAmelCase : int = False
UpperCAmelCase : Dict = False
UpperCAmelCase : Any = False
def __snake_case ( self : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int]):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __snake_case ( self : Any):
a : Any = TFTransfoXLModelTester(self)
a : Any = ConfigTester(self , config_class=__UpperCAmelCase , d_embed=37)
def __snake_case ( self : int):
self.config_tester.run_common_tests()
def __snake_case ( self : int):
self.model_tester.set_seed()
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*__UpperCAmelCase)
def __snake_case ( self : str):
self.model_tester.set_seed()
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*__UpperCAmelCase)
def __snake_case ( self : Optional[int]):
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__UpperCAmelCase)
def __snake_case ( self : Tuple):
a , a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
a : Optional[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
a : Optional[int] = model_class(__UpperCAmelCase)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class in list_other_models_with_output_ebd:
a : Optional[int] = model.get_output_embeddings()
assert isinstance(__UpperCAmelCase , tf.keras.layers.Layer)
a : List[str] = model.get_bias()
assert name is None
else:
a : List[str] = model.get_output_embeddings()
assert x is None
a : Optional[int] = model.get_bias()
assert name is None
def __snake_case ( self : Optional[Any]):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def __snake_case ( self : str):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Tuple = TFTransfoXLModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss.")
def __snake_case ( self : Tuple):
pass
@require_tf
class _A ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip("Skip test until #12651 is resolved.")
@slow
def __snake_case ( self : List[str]):
a : int = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103")
# fmt: off
a : Tuple = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
a : Union[str, Any] = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
a : Optional[int] = model.generate(__UpperCAmelCase , max_length=200 , do_sample=__UpperCAmelCase)
self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCAmelCase)
| 40
|
"""simple docstring"""
class _A :
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : int):
a : Tuple = size
a : Dict = [0] * size
a : Optional[int] = [0] * size
@staticmethod
def __snake_case ( __UpperCAmelCase : int):
return index | (index + 1)
@staticmethod
def __snake_case ( __UpperCAmelCase : int):
return (index & (index + 1)) - 1
def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int):
a : Union[str, Any] = value
while index < self.size:
a : Dict = self.get_prev(__UpperCAmelCase) + 1
if current_left_border == index:
a : Optional[int] = value
else:
a : Any = max(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = self.get_next(__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int):
right -= 1 # Because of right is exclusive
a : List[str] = 0
while left <= right:
a : Dict = self.get_prev(__UpperCAmelCase)
if left <= current_left:
a : Optional[int] = max(__UpperCAmelCase , self.tree[right])
a : Optional[Any] = current_left
else:
a : List[str] = max(__UpperCAmelCase , self.arr[right])
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 1
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 40
|
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[Any]):
a : str = 0
a : Optional[int] = [0]
a : Union[str, Any] = [0]
a : Any = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0)
a : List[str] = [60]
a : str = [10]
a : Optional[int] = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0)
def __snake_case ( self : Optional[int]):
a : Any = 3
a : str = [1, 2, 3]
a : Tuple = [3, 2, 1]
a : Any = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5)
def __snake_case ( self : Tuple):
a : int = 50
a : List[Any] = [60, 100, 120]
a : Optional[int] = [10, 20, 30]
a : str = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220)
if __name__ == "__main__":
unittest.main()
| 40
| 1
|
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def lowercase ( A_ , A_ )-> str:
'''simple docstring'''
a : List[Any] = BeautifulSoup(requests.get(A_ , params=A_ ).content , "html.parser" )
a : Tuple = soup.find("div" , attrs={"class": "gs_ri"} )
a : Any = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
__lowercase = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 40
|
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : str = LayoutLMTokenizer
UpperCAmelCase : int = LayoutLMTokenizerFast
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Optional[Any] = True
def __snake_case ( self : Optional[int]):
super().setUp()
a : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def __snake_case ( self : Optional[int] , **__UpperCAmelCase : Tuple):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str):
a : Tuple = "UNwant\u00E9d,running"
a : Dict = "unwanted, running"
return input_text, output_text
def __snake_case ( self : Any):
a : List[Any] = self.tokenizer_class(self.vocab_file)
a : str = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [7, 4, 5, 10, 8, 9])
def __snake_case ( self : Dict):
pass
| 40
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowercase = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40
|
"""simple docstring"""
def lowercase ( A_ )-> str:
'''simple docstring'''
if isinstance(A_ , A_ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(A_ , A_ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
a : Optional[Any] = False
if num < 0:
a : Tuple = True
a : str = -num
a : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(A_ ) for e in binary )
return "0b" + "".join(str(A_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 1
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
__lowercase = True
except (ImportError, ModuleNotFoundError):
__lowercase = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def lowercase ( A_ )-> str:
'''simple docstring'''
re.sub("<n>" , "" , A_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(A_ ) )
| 40
|
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def lowercase ( A_ , A_ , A_ , A_ )-> Union[str, Any]:
'''simple docstring'''
a : Any = sorted(zip(A_ , A_ ) , key=lambda A_ : x[0] / x[1] , reverse=A_ )
a , a : int = [i[0] for i in r], [i[1] for i in r]
a : Union[str, Any] = list(accumulate(A_ ) )
a : Optional[Any] = bisect(A_ , A_ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Tuple = ["""pixel_values"""]
def __init__( self : Optional[Any] , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Dict[str, int]] = None , __UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCAmelCase : bool = True , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : Union[int, float] = 1 / 255 , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , **__UpperCAmelCase : Union[str, Any] , ):
super().__init__(**__UpperCAmelCase)
a : Any = size if size is not None else {"shortest_edge": 256}
a : Union[str, Any] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase)
a : List[Any] = crop_size if crop_size is not None else {"height": 224, "width": 224}
a : Dict = get_size_dict(__UpperCAmelCase)
a : List[Any] = do_resize
a : Dict = size
a : str = resample
a : Tuple = do_center_crop
a : int = crop_size
a : Dict = do_rescale
a : List[str] = rescale_factor
a : Optional[Any] = do_normalize
a : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __snake_case ( self : Dict , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Dict[str, int] , __UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : List[str] , ):
a : Optional[int] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase)
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''')
a : int = get_resize_output_image_size(__UpperCAmelCase , size=size["shortest_edge"] , default_to_square=__UpperCAmelCase)
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : Dict , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Dict[str, int] , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : Union[str, Any] , ):
a : Dict = get_size_dict(__UpperCAmelCase)
return center_crop(__UpperCAmelCase , size=(size["height"], size["width"]) , data_format=__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : float , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : List[str]):
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : Any , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Union[float, List[float]] , __UpperCAmelCase : Union[float, List[float]] , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : Optional[Any] , ):
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : List[Any] , __UpperCAmelCase : ImageInput , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : PILImageResampling = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[float] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__UpperCAmelCase : List[str] , ):
a : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
a : Any = size if size is not None else self.size
a : Dict = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase)
a : List[Any] = resample if resample is not None else self.resample
a : int = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
a : Optional[Any] = get_size_dict(__UpperCAmelCase)
a : Tuple = do_rescale if do_rescale is not None else self.do_rescale
a : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
a : Any = image_mean if image_mean is not None else self.image_mean
a : Optional[int] = image_std if image_std is not None else self.image_std
a : Tuple = make_list_of_images(__UpperCAmelCase)
if not valid_images(__UpperCAmelCase):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# All transformations expect numpy arrays.
a : int = [to_numpy_array(__UpperCAmelCase) for image in images]
if do_resize:
a : Optional[int] = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase) for image in images]
if do_center_crop:
a : List[Any] = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase) for image in images]
if do_rescale:
a : List[Any] = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase) for image in images]
if do_normalize:
a : Union[str, Any] = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase) for image in images]
a : Any = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase) for image in images]
a : List[Any] = {"pixel_values": images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase)
| 40
|
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowercase ( A_ , A_ , A_ = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(A_ ), magnitude * sin(A_ )]
return [magnitude * cos(radians(A_ ) ), magnitude * sin(radians(A_ ) )]
def lowercase ( A_ , A_ , A_ = 10**-1 )-> bool:
'''simple docstring'''
a : NDArray[floataa] = cross(A_ , A_ )
a : float = sum(A_ )
return abs(A_ ) < eps
if __name__ == "__main__":
# Test to check if it works
__lowercase = array(
[
polar_force(7_18.4, 180 - 30),
polar_force(8_79.54, 45),
polar_force(100, -90),
]
)
__lowercase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__lowercase = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
__lowercase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__lowercase = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
__lowercase = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 40
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowercase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Dict = ["""pixel_values"""]
def __init__( self : List[str] , __UpperCAmelCase : bool = True , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCAmelCase : bool = True , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : Union[int, float] = 1 / 255 , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : bool = True , **__UpperCAmelCase : Optional[Any] , ):
super().__init__(**__UpperCAmelCase)
a : Dict = size if size is not None else {"shortest_edge": 224}
a : Dict = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase)
a : Union[str, Any] = crop_size if crop_size is not None else {"height": 224, "width": 224}
a : Optional[int] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name="crop_size")
a : int = do_resize
a : List[Any] = size
a : Dict = resample
a : List[str] = do_center_crop
a : Any = crop_size
a : Dict = do_rescale
a : Optional[int] = rescale_factor
a : Tuple = do_normalize
a : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
a : Dict = image_std if image_std is not None else OPENAI_CLIP_STD
a : Union[str, Any] = do_convert_rgb
def __snake_case ( self : List[Any] , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Dict[str, int] , __UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : List[Any] , ):
a : Optional[Any] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase)
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''')
a : Any = get_resize_output_image_size(__UpperCAmelCase , size=size["shortest_edge"] , default_to_square=__UpperCAmelCase)
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : Tuple , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Dict[str, int] , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : Union[str, Any] , ):
a : Tuple = get_size_dict(__UpperCAmelCase)
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''')
return center_crop(__UpperCAmelCase , size=(size["height"], size["width"]) , data_format=__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : int , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Union[int, float] , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : List[Any] , ):
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Union[float, List[float]] , __UpperCAmelCase : Union[float, List[float]] , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : List[str] , ):
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : ImageInput , __UpperCAmelCase : bool = None , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : PILImageResampling = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : int = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : float = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : Optional[Union[float, List[float]]] = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **__UpperCAmelCase : Union[str, Any] , ):
a : Optional[int] = do_resize if do_resize is not None else self.do_resize
a : Union[str, Any] = size if size is not None else self.size
a : Any = get_size_dict(__UpperCAmelCase , param_name="size" , default_to_square=__UpperCAmelCase)
a : Dict = resample if resample is not None else self.resample
a : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Any = crop_size if crop_size is not None else self.crop_size
a : int = get_size_dict(__UpperCAmelCase , param_name="crop_size" , default_to_square=__UpperCAmelCase)
a : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
a : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
a : Optional[Any] = image_mean if image_mean is not None else self.image_mean
a : Union[str, Any] = image_std if image_std is not None else self.image_std
a : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
a : Any = make_list_of_images(__UpperCAmelCase)
if not valid_images(__UpperCAmelCase):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True.")
# PIL RGBA images are converted to RGB
if do_convert_rgb:
a : Union[str, Any] = [convert_to_rgb(__UpperCAmelCase) for image in images]
# All transformations expect numpy arrays.
a : List[str] = [to_numpy_array(__UpperCAmelCase) for image in images]
if do_resize:
a : int = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase) for image in images]
if do_center_crop:
a : List[str] = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase) for image in images]
if do_rescale:
a : List[str] = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase) for image in images]
if do_normalize:
a : List[str] = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase) for image in images]
a : int = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase) for image in images]
a : List[Any] = {"pixel_values": images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase)
| 40
|
"""simple docstring"""
def lowercase ( A_ , A_ )-> float:
'''simple docstring'''
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(A_ ) * abs(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 40
| 1
|
"""simple docstring"""
def lowercase ( A_ , A_ )-> int:
'''simple docstring'''
return 1 if input_a == input_a else 0
def lowercase ( )-> None:
'''simple docstring'''
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 40
|
"""simple docstring"""
import os
import sys
import unittest
__lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowercase = os.path.join(git_repo_path, """src""", """diffusers""")
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Any):
a : List[Any] = find_backend(" if not is_torch_available():")
self.assertEqual(__UpperCAmelCase , "torch")
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
a : Dict = find_backend(" if not (is_torch_available() and is_transformers_available()):")
self.assertEqual(__UpperCAmelCase , "torch_and_transformers")
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
a : int = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):")
self.assertEqual(__UpperCAmelCase , "torch_and_transformers_and_onnx")
def __snake_case ( self : Union[str, Any]):
a : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , __UpperCAmelCase)
self.assertIn("torch_and_transformers" , __UpperCAmelCase)
self.assertIn("flax_and_transformers" , __UpperCAmelCase)
self.assertIn("torch_and_transformers_and_onnx" , __UpperCAmelCase)
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"])
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"])
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"])
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"])
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"])
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"])
def __snake_case ( self : Tuple):
a : Optional[int] = create_dummy_object("CONSTANT" , "'torch'")
self.assertEqual(__UpperCAmelCase , "\nCONSTANT = None\n")
a : Dict = create_dummy_object("function" , "'torch'")
self.assertEqual(
__UpperCAmelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n")
a : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
a : int = create_dummy_object("FakeClass" , "'torch'")
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : List[str]):
a : List[str] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
a : Tuple = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]})
self.assertEqual(dummy_files["torch"] , __UpperCAmelCase)
| 40
| 1
|
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
__lowercase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowercase ( A_ )-> list[list[int]]:
'''simple docstring'''
a : str = []
for i in range(len(A_ ) ):
a : str = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
a : Union[str, Any] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
a : Tuple = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A_ )
return next_generation
def lowercase ( A_ , A_ )-> list[Image.Image]:
'''simple docstring'''
a : List[str] = []
for _ in range(A_ ):
# Create output image
a : str = Image.new("RGB" , (len(cells[0] ), len(A_ )) )
a : Union[str, Any] = img.load()
# Save cells to image
for x in range(len(A_ ) ):
for y in range(len(cells[0] ) ):
a : Optional[Any] = 255 - cells[y][x] * 255
a : str = (colour, colour, colour)
# Save image
images.append(A_ )
a : Tuple = new_generation(A_ )
return images
if __name__ == "__main__":
__lowercase = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 40
|
"""simple docstring"""
__lowercase = {
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 40
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Union[PIL.Image.Image, np.ndarray]
class _A ( _a ):
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : PriorTransformer , __UpperCAmelCase : CLIPVisionModel , __UpperCAmelCase : CLIPImageProcessor , __UpperCAmelCase : HeunDiscreteScheduler , __UpperCAmelCase : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=__UpperCAmelCase , image_encoder=__UpperCAmelCase , image_processor=__UpperCAmelCase , scheduler=__UpperCAmelCase , renderer=__UpperCAmelCase , )
def __snake_case ( self : int , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int]):
if latents is None:
a : Any = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase)
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''')
a : Union[str, Any] = latents.to(__UpperCAmelCase)
a : Any = latents * scheduler.init_noise_sigma
return latents
def __snake_case ( self : Any , __UpperCAmelCase : str=0):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`")
a : Tuple = torch.device(f'''cuda:{gpu_id}''')
a : Tuple = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCAmelCase , __UpperCAmelCase)
@property
def __snake_case ( self : int):
if self.device != torch.device("meta") or not hasattr(self.image_encoder , "_hf_hook"):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(__UpperCAmelCase , "_hf_hook")
and hasattr(module._hf_hook , "execution_device")
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
def __snake_case ( self : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : List[Any] , ):
if isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(image[0] , torch.Tensor):
a : List[Any] = torch.cat(__UpperCAmelCase , axis=0) if image[0].ndim == 4 else torch.stack(__UpperCAmelCase , axis=0)
if not isinstance(__UpperCAmelCase , torch.Tensor):
a : Any = self.image_processor(__UpperCAmelCase , return_tensors="pt").pixel_values[0].unsqueeze(0)
a : Union[str, Any] = image.to(dtype=self.image_encoder.dtype , device=__UpperCAmelCase)
a : List[Any] = self.image_encoder(__UpperCAmelCase)["last_hidden_state"]
a : List[str] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
a : str = image_embeds.repeat_interleave(__UpperCAmelCase , dim=0)
if do_classifier_free_guidance:
a : Dict = torch.zeros_like(__UpperCAmelCase)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a : int = torch.cat([negative_image_embeds, image_embeds])
return image_embeds
@torch.no_grad()
@replace_example_docstring(__UpperCAmelCase)
def __call__( self : Any , __UpperCAmelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 25 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : float = 4.0 , __UpperCAmelCase : int = 64 , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , ):
if isinstance(__UpperCAmelCase , PIL.Image.Image):
a : Tuple = 1
elif isinstance(__UpperCAmelCase , torch.Tensor):
a : Optional[Any] = image.shape[0]
elif isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image)):
a : int = len(__UpperCAmelCase)
else:
raise ValueError(
f'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(__UpperCAmelCase)}''')
a : List[Any] = self._execution_device
a : Optional[Any] = batch_size * num_images_per_prompt
a : Union[str, Any] = guidance_scale > 1.0
a : Optional[int] = self._encode_image(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
# prior
self.scheduler.set_timesteps(__UpperCAmelCase , device=__UpperCAmelCase)
a : str = self.scheduler.timesteps
a : Optional[int] = self.prior.config.num_embeddings
a : Optional[int] = self.prior.config.embedding_dim
a : List[str] = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
a : List[str] = latents.reshape(latents.shape[0] , __UpperCAmelCase , __UpperCAmelCase)
for i, t in enumerate(self.progress_bar(__UpperCAmelCase)):
# expand the latents if we are doing classifier free guidance
a : Union[str, Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
a : int = self.scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = self.prior(
__UpperCAmelCase , timestep=__UpperCAmelCase , proj_embedding=__UpperCAmelCase , ).predicted_image_embedding
# remove the variance
a , a : Optional[int] = noise_pred.split(
scaled_model_input.shape[2] , dim=2) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
a , a : Any = noise_pred.chunk(2)
a : Tuple = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
a : Any = self.scheduler.step(
__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=__UpperCAmelCase)
a : Optional[int] = []
for i, latent in enumerate(__UpperCAmelCase):
print()
a : Tuple = self.renderer.decode(
latent[None, :] , __UpperCAmelCase , size=__UpperCAmelCase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(__UpperCAmelCase)
a : List[str] = torch.stack(__UpperCAmelCase)
if output_type not in ["np", "pil"]:
raise ValueError(f'''Only the output types `pil` and `np` are supported not output_type={output_type}''')
a : Union[str, Any] = images.cpu().numpy()
if output_type == "pil":
a : int = [self.numpy_to_pil(__UpperCAmelCase) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook") and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=__UpperCAmelCase)
| 40
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40
| 1
|
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowercase ( A_ , A_ , A_ = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(A_ ), magnitude * sin(A_ )]
return [magnitude * cos(radians(A_ ) ), magnitude * sin(radians(A_ ) )]
def lowercase ( A_ , A_ , A_ = 10**-1 )-> bool:
'''simple docstring'''
a : NDArray[floataa] = cross(A_ , A_ )
a : float = sum(A_ )
return abs(A_ ) < eps
if __name__ == "__main__":
# Test to check if it works
__lowercase = array(
[
polar_force(7_18.4, 180 - 30),
polar_force(8_79.54, 45),
polar_force(100, -90),
]
)
__lowercase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__lowercase = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
__lowercase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__lowercase = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
__lowercase = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 40
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : str = """naver-clova-ix/donut-base-finetuned-docvqa"""
UpperCAmelCase : Tuple = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
UpperCAmelCase : List[str] = """document_qa"""
UpperCAmelCase : str = AutoProcessor
UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel
UpperCAmelCase : int = ["""image""", """text"""]
UpperCAmelCase : int = ["""text"""]
def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any):
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.")
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : Tuple , __UpperCAmelCase : "Image" , __UpperCAmelCase : str):
a : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
a : Union[str, Any] = task_prompt.replace("{user_input}" , __UpperCAmelCase)
a : Optional[Any] = self.pre_processor.tokenizer(
__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt").input_ids
a : Any = self.pre_processor(__UpperCAmelCase , return_tensors="pt").pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __snake_case ( self : int , __UpperCAmelCase : int):
return self.model.generate(
inputs["pixel_values"].to(self.device) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences
def __snake_case ( self : str , __UpperCAmelCase : List[Any]):
a : Union[str, Any] = self.pre_processor.batch_decode(__UpperCAmelCase)[0]
a : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "")
a : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "")
a : Optional[Any] = re.sub(r"<.*?>" , "" , __UpperCAmelCase , count=1).strip() # remove first task start token
a : List[str] = self.pre_processor.tokenajson(__UpperCAmelCase)
return sequence["answer"]
| 40
| 1
|
"""simple docstring"""
def lowercase ( A_ , A_ )-> int:
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def lowercase ( )-> None:
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 40
|
"""simple docstring"""
from __future__ import annotations
class _A :
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : int = 0):
a : Tuple = key
def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
a : Dict = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__UpperCAmelCase) ^ key) for ch in content]
def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__UpperCAmelCase) ^ key) for ch in content]
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
a : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
a : Any = ""
for ch in content:
ans += chr(ord(__UpperCAmelCase) ^ key)
return ans
def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
a : Dict = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
a : str = ""
for ch in content:
ans += chr(ord(__UpperCAmelCase) ^ key)
return ans
def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int = 0):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
try:
with open(__UpperCAmelCase) as fin, open("encrypt.out" , "w+") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__UpperCAmelCase , __UpperCAmelCase))
except OSError:
return False
return True
def __snake_case ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : int):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
try:
with open(__UpperCAmelCase) as fin, open("decrypt.out" , "w+") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__UpperCAmelCase , __UpperCAmelCase))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 40
| 1
|
"""simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Optional[int]):
a : Dict = torch.nn.Linear(10 , 10)
a : List[str] = torch.optim.SGD(model.parameters() , 0.1)
a : int = Accelerator()
a : Any = accelerator.prepare(__UpperCAmelCase)
try:
pickle.loads(pickle.dumps(__UpperCAmelCase))
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''')
AcceleratorState._reset_state()
| 40
|
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowercase ( A_ )-> List[Any]:
'''simple docstring'''
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
class _A :
"""simple docstring"""
def __init__( self : str , __UpperCAmelCase : int):
a : List[Any] = metric_id
class _A :
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = [MetricMock(_a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def __snake_case ( self : List[str]):
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Any:
'''simple docstring'''
if "tmp_path" in args:
a : Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(A_ , match="https://huggingface.co/docs/evaluate" ):
func(*A_ )
| 40
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class _A :
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : Union[str, Any] , ):
a : Optional[Any] = parent
a : str = 13
a : Optional[int] = 7
a : Tuple = True
a : str = True
a : Tuple = False
a : List[str] = True
a : Dict = 99
a : int = 32
a : Any = 2
a : int = 4
a : Optional[Any] = 37
a : int = "gelu"
a : int = 0.1
a : int = 0.1
a : Optional[int] = 512
a : int = 16
a : Tuple = 2
a : str = 0.02
a : List[str] = 3
a : Optional[Any] = 4
a : List[Any] = None
def __snake_case ( self : Optional[int]):
a : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : Any = None
if self.use_input_mask:
a : List[str] = random_attention_mask([self.batch_size, self.seq_length])
a : List[Any] = None
a : int = None
a : Any = None
if self.use_labels:
a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices)
a : Optional[int] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str]):
a : Optional[Any] = TFDistilBertModel(config=__UpperCAmelCase)
a : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
a : List[Any] = model(__UpperCAmelCase)
a : List[Any] = [input_ids, input_mask]
a : Any = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __snake_case ( self : str , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : int):
a : Optional[int] = TFDistilBertForMaskedLM(config=__UpperCAmelCase)
a : int = {"input_ids": input_ids, "attention_mask": input_mask}
a : int = model(__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str):
a : Optional[int] = TFDistilBertForQuestionAnswering(config=__UpperCAmelCase)
a : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
a : List[str] = model(__UpperCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __snake_case ( self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any]):
a : int = self.num_labels
a : Optional[int] = TFDistilBertForSequenceClassification(__UpperCAmelCase)
a : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
a : Dict = model(__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __snake_case ( self : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str]):
a : str = self.num_choices
a : Optional[Any] = TFDistilBertForMultipleChoice(__UpperCAmelCase)
a : Any = tf.tile(tf.expand_dims(__UpperCAmelCase , 1) , (1, self.num_choices, 1))
a : Union[str, Any] = tf.tile(tf.expand_dims(__UpperCAmelCase , 1) , (1, self.num_choices, 1))
a : str = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
a : Tuple = model(__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __snake_case ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : List[Any]):
a : Tuple = self.num_labels
a : Dict = TFDistilBertForTokenClassification(__UpperCAmelCase)
a : int = {"input_ids": input_ids, "attention_mask": input_mask}
a : Tuple = model(__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __snake_case ( self : Any):
a : List[str] = self.prepare_config_and_inputs()
((a) , (a) , (a) , (a) , (a) , (a)) : Optional[Any] = config_and_inputs
a : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class _A ( _a ,_a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : List[str] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCAmelCase : Optional[int] = (
{
"""feature-extraction""": TFDistilBertModel,
"""fill-mask""": TFDistilBertForMaskedLM,
"""question-answering""": TFDistilBertForQuestionAnswering,
"""text-classification""": TFDistilBertForSequenceClassification,
"""token-classification""": TFDistilBertForTokenClassification,
"""zero-shot""": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : Tuple = False
def __snake_case ( self : str):
a : int = TFDistilBertModelTester(self)
a : Any = ConfigTester(self , config_class=__UpperCAmelCase , dim=37)
def __snake_case ( self : Optional[Any]):
self.config_tester.run_common_tests()
def __snake_case ( self : Tuple):
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__UpperCAmelCase)
def __snake_case ( self : Optional[int]):
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__UpperCAmelCase)
def __snake_case ( self : Tuple):
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__UpperCAmelCase)
def __snake_case ( self : Union[str, Any]):
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__UpperCAmelCase)
def __snake_case ( self : Tuple):
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__UpperCAmelCase)
def __snake_case ( self : Optional[int]):
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__UpperCAmelCase)
@slow
def __snake_case ( self : Optional[Any]):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]):
a : Tuple = TFDistilBertModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
@require_tf
class _A ( unittest.TestCase ):
"""simple docstring"""
@slow
def __snake_case ( self : Tuple):
a : List[str] = TFDistilBertModel.from_pretrained("distilbert-base-uncased")
a : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]])
a : List[Any] = model(__UpperCAmelCase)[0]
a : Union[str, Any] = [1, 6, 768]
self.assertEqual(output.shape , __UpperCAmelCase)
a : Optional[int] = tf.constant(
[
[
[0.19_261_885, -0.13_732_955, 0.4_119_799],
[0.22_150_156, -0.07_422_661, 0.39_037_204],
[0.22_756_018, -0.0_896_414, 0.3_701_467],
]
])
tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1e-4)
| 40
|
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
__lowercase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowercase ( A_ )-> list[list[int]]:
'''simple docstring'''
a : str = []
for i in range(len(A_ ) ):
a : str = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
a : Union[str, Any] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
a : Tuple = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A_ )
return next_generation
def lowercase ( A_ , A_ )-> list[Image.Image]:
'''simple docstring'''
a : List[str] = []
for _ in range(A_ ):
# Create output image
a : str = Image.new("RGB" , (len(cells[0] ), len(A_ )) )
a : Union[str, Any] = img.load()
# Save cells to image
for x in range(len(A_ ) ):
for y in range(len(cells[0] ) ):
a : Optional[Any] = 255 - cells[y][x] * 255
a : str = (colour, colour, colour)
# Save image
images.append(A_ )
a : Tuple = new_generation(A_ )
return images
if __name__ == "__main__":
__lowercase = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 40
| 1
|
"""simple docstring"""
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
__lowercase = datasets.utils.logging.get_logger(__name__)
class _A ( folder_based_builder.FolderBasedBuilderConfig ):
"""simple docstring"""
UpperCAmelCase : bool = None
UpperCAmelCase : bool = None
class _A ( folder_based_builder.FolderBasedBuilder ):
"""simple docstring"""
UpperCAmelCase : Any = datasets.Audio()
UpperCAmelCase : str = """audio"""
UpperCAmelCase : Optional[int] = AudioFolderConfig
UpperCAmelCase : List[str] # definition at the bottom of the script
UpperCAmelCase : str = AudioClassification(audio_column="""audio""" ,label_column="""label""" )
__lowercase = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
__lowercase = AUDIO_EXTENSIONS
| 40
|
"""simple docstring"""
from itertools import permutations
def lowercase ( A_ )-> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
a : Optional[int] = [7, 11, 13, 17]
for i, test in enumerate(A_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase ( A_ = 10 )-> int:
'''simple docstring'''
return sum(
int("".join(map(A_ , A_ ) ) )
for num in permutations(range(A_ ) )
if is_substring_divisible(A_ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 40
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase = {
"""configuration_rag""": ["""RagConfig"""],
"""retrieval_rag""": ["""RagRetriever"""],
"""tokenization_rag""": ["""RagTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""RagModel""",
"""RagPreTrainedModel""",
"""RagSequenceForGeneration""",
"""RagTokenForGeneration""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TFRagModel""",
"""TFRagPreTrainedModel""",
"""TFRagSequenceForGeneration""",
"""TFRagTokenForGeneration""",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Dict = KandinskyVaaControlnetPipeline
UpperCAmelCase : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
UpperCAmelCase : Optional[Any] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
UpperCAmelCase : Dict = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCAmelCase : Optional[int] = False
@property
def __snake_case ( self : Optional[Any]):
return 32
@property
def __snake_case ( self : Dict):
return 32
@property
def __snake_case ( self : Dict):
return self.time_input_dim
@property
def __snake_case ( self : Any):
return self.time_input_dim * 4
@property
def __snake_case ( self : str):
return 100
@property
def __snake_case ( self : str):
torch.manual_seed(0)
a : str = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
a : Dict = UNetaDConditionModel(**__UpperCAmelCase)
return model
@property
def __snake_case ( self : str):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __snake_case ( self : Union[str, Any]):
torch.manual_seed(0)
a : Dict = VQModel(**self.dummy_movq_kwargs)
return model
def __snake_case ( self : Optional[Any]):
a : Optional[Any] = self.dummy_unet
a : int = self.dummy_movq
a : str = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__UpperCAmelCase , )
a : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=0):
a : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase)
a : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
__UpperCAmelCase)
# create hint
a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase)
if str(__UpperCAmelCase).startswith("mps"):
a : Union[str, Any] = torch.manual_seed(__UpperCAmelCase)
else:
a : List[Any] = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase)
a : str = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __snake_case ( self : Dict):
a : str = "cpu"
a : Tuple = self.get_dummy_components()
a : Dict = self.pipeline_class(**__UpperCAmelCase)
a : Optional[int] = pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCAmelCase))
a : Any = output.images
a : Any = pipe(
**self.get_dummy_inputs(__UpperCAmelCase) , return_dict=__UpperCAmelCase , )[0]
a : Union[str, Any] = image[0, -3:, -3:, -1]
a : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a : Tuple = np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Optional[int]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : List[str]):
a : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy")
a : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
a : List[Any] = torch.from_numpy(np.array(__UpperCAmelCase)).float() / 255.0
a : str = hint.permute(2 , 0 , 1).unsqueeze(0)
a : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(__UpperCAmelCase)
a : List[str] = KandinskyVaaControlnetPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa)
a : int = pipeline.to(__UpperCAmelCase)
pipeline.set_progress_bar_config(disable=__UpperCAmelCase)
a : Tuple = "A robot, 4k photo"
a : Any = torch.Generator(device="cuda").manual_seed(0)
a , a : int = pipe_prior(
__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
a : str = torch.Generator(device="cuda").manual_seed(0)
a : Union[str, Any] = pipeline(
image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , hint=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , output_type="np" , )
a : str = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase)
| 40
| 1
|
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Tuple = (EulerDiscreteScheduler,)
UpperCAmelCase : Tuple = 1_0
def __snake_case ( self : str , **__UpperCAmelCase : Union[str, Any]):
a : Any = {
"num_train_timesteps": 1100,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**__UpperCAmelCase)
return config
def __snake_case ( self : Optional[int]):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__UpperCAmelCase)
def __snake_case ( self : int):
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02]):
self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase)
def __snake_case ( self : Union[str, Any]):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__UpperCAmelCase)
def __snake_case ( self : List[Any]):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCAmelCase)
def __snake_case ( self : List[str]):
a : Optional[Any] = self.scheduler_classes[0]
a : str = self.get_scheduler_config()
a : Optional[int] = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(self.num_inference_steps)
a : Dict = torch.manual_seed(0)
a : Union[str, Any] = self.dummy_model()
a : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
a : Optional[int] = sample.to(__UpperCAmelCase)
for i, t in enumerate(scheduler.timesteps):
a : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase)
a : List[str] = model(__UpperCAmelCase , __UpperCAmelCase)
a : List[str] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase)
a : Any = output.prev_sample
a : str = torch.sum(torch.abs(__UpperCAmelCase))
a : str = torch.mean(torch.abs(__UpperCAmelCase))
assert abs(result_sum.item() - 10.0_807) < 1e-2
assert abs(result_mean.item() - 0.0_131) < 1e-3
def __snake_case ( self : Tuple):
a : Any = self.scheduler_classes[0]
a : Optional[int] = self.get_scheduler_config(prediction_type="v_prediction")
a : List[Any] = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(self.num_inference_steps)
a : Tuple = torch.manual_seed(0)
a : Dict = self.dummy_model()
a : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
a : str = sample.to(__UpperCAmelCase)
for i, t in enumerate(scheduler.timesteps):
a : Any = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase)
a : List[Any] = model(__UpperCAmelCase , __UpperCAmelCase)
a : Any = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase)
a : List[str] = output.prev_sample
a : str = torch.sum(torch.abs(__UpperCAmelCase))
a : Optional[int] = torch.mean(torch.abs(__UpperCAmelCase))
assert abs(result_sum.item() - 0.0_002) < 1e-2
assert abs(result_mean.item() - 2.2676e-06) < 1e-3
def __snake_case ( self : Optional[int]):
a : List[str] = self.scheduler_classes[0]
a : List[Any] = self.get_scheduler_config()
a : Dict = scheduler_class(**__UpperCAmelCase)
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase)
a : str = torch.manual_seed(0)
a : str = self.dummy_model()
a : Any = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
a : List[str] = sample.to(__UpperCAmelCase)
for t in scheduler.timesteps:
a : Any = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = model(__UpperCAmelCase , __UpperCAmelCase)
a : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase)
a : Union[str, Any] = output.prev_sample
a : str = torch.sum(torch.abs(__UpperCAmelCase))
a : List[str] = torch.mean(torch.abs(__UpperCAmelCase))
assert abs(result_sum.item() - 10.0_807) < 1e-2
assert abs(result_mean.item() - 0.0_131) < 1e-3
def __snake_case ( self : str):
a : Any = self.scheduler_classes[0]
a : List[Any] = self.get_scheduler_config()
a : Any = scheduler_class(**__UpperCAmelCase , use_karras_sigmas=__UpperCAmelCase)
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase)
a : Any = torch.manual_seed(0)
a : str = self.dummy_model()
a : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
a : Union[str, Any] = sample.to(__UpperCAmelCase)
for t in scheduler.timesteps:
a : Tuple = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase)
a : Union[str, Any] = model(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase)
a : List[str] = output.prev_sample
a : Union[str, Any] = torch.sum(torch.abs(__UpperCAmelCase))
a : Tuple = torch.mean(torch.abs(__UpperCAmelCase))
assert abs(result_sum.item() - 124.52_299_499_511_719) < 1e-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963) < 1e-3
| 40
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def lowercase ( A_ )-> Dict:
'''simple docstring'''
a : str = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
a : Union[str, Any] = 128
elif "12-12" in model_name:
a : List[Any] = 12
a : str = 12
elif "14-14" in model_name:
a : List[Any] = 14
a : Optional[int] = 14
elif "16-16" in model_name:
a : Any = 16
a : List[Any] = 16
else:
raise ValueError("Model not supported" )
a : Optional[int] = "huggingface/label-files"
if "speech-commands" in model_name:
a : Optional[int] = 35
a : List[str] = "speech-commands-v2-id2label.json"
else:
a : Optional[Any] = 527
a : Tuple = "audioset-id2label.json"
a : List[str] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
a : Union[str, Any] = {int(A_ ): v for k, v in idalabel.items()}
a : Any = idalabel
a : str = {v: k for k, v in idalabel.items()}
return config
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
if "module.v" in name:
a : Union[str, Any] = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
a : List[Any] = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
a : Union[str, Any] = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
a : str = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
a : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
a : Union[str, Any] = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
a : str = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
a : Tuple = name.replace("attn" , "attention.self" )
if "norm1" in name:
a : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
a : Union[str, Any] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
a : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a : Optional[Any] = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
a : Tuple = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
a : List[str] = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
a : Optional[int] = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def lowercase ( A_ , A_ )-> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
a : str = orig_state_dict.pop(A_ )
if "qkv" in key:
a : int = key.split("." )
a : Optional[int] = int(key_split[3] )
a : int = config.hidden_size
if "weight" in key:
a : List[str] = val[:dim, :]
a : Any = val[dim : dim * 2, :]
a : int = val[-dim:, :]
else:
a : Optional[Any] = val[:dim]
a : Union[str, Any] = val[dim : dim * 2]
a : str = val[-dim:]
else:
a : str = val
return orig_state_dict
def lowercase ( A_ )-> Dict:
'''simple docstring'''
a : Union[str, Any] = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(A_ , A_ )
@torch.no_grad()
def lowercase ( A_ , A_ , A_=False )-> Optional[int]:
'''simple docstring'''
a : Optional[int] = get_audio_spectrogram_transformer_config(A_ )
a : Dict = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
a : Any = model_name_to_url[model_name]
a : List[Any] = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" )
# remove some keys
remove_keys(A_ )
# rename some keys
a : Union[str, Any] = convert_state_dict(A_ , A_ )
# load 🤗 model
a : List[str] = ASTForAudioClassification(A_ )
model.eval()
model.load_state_dict(A_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
a : Tuple = -4.2_6_7_7_3_9_3 if "speech-commands" not in model_name else -6.8_4_5_9_7_8
a : Union[str, Any] = 4.5_6_8_9_9_7_4 if "speech-commands" not in model_name else 5.5_6_5_4_5_2_6
a : str = 1_024 if "speech-commands" not in model_name else 128
a : List[Any] = ASTFeatureExtractor(mean=A_ , std=A_ , max_length=A_ )
if "speech-commands" in model_name:
a : List[str] = load_dataset("speech_commands" , "v0.02" , split="validation" )
a : int = dataset[0]["audio"]["array"]
else:
a : Tuple = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
a , a : Tuple = torchaudio.load(A_ )
a : Optional[Any] = waveform.squeeze().numpy()
a : Union[str, Any] = feature_extractor(A_ , sampling_rate=16_000 , return_tensors="pt" )
# forward pass
a : Optional[Any] = model(**A_ )
a : List[str] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
a : Any = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
a : Optional[int] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
a : List[str] = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
a : Tuple = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
a : int = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
a : Any = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
a : Dict = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] )
elif model_name == "ast-finetuned-speech-commands-v2":
a : Union[str, Any] = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , A_ , atol=1e-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(A_ ).mkdir(exist_ok=A_ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(A_ )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowercase = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 40
| 1
|
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__lowercase = """true"""
def lowercase ( A_ , A_=82 , A_=16 )-> Tuple:
'''simple docstring'''
set_seed(42 )
a : Dict = RegressionModel()
a : Tuple = deepcopy(A_ )
a : List[str] = RegressionDataset(length=A_ )
a : List[Any] = DataLoader(A_ , batch_size=A_ )
model.to(accelerator.device )
a , a : int = accelerator.prepare(A_ , A_ )
return model, ddp_model, dataloader
def lowercase ( A_ , A_=False )-> List[Any]:
'''simple docstring'''
a : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
a : List[str] = load_dataset("glue" , "mrpc" , split="validation" )
def tokenize_function(A_ ):
a : Union[str, Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A_ , max_length=A_ )
return outputs
with accelerator.main_process_first():
a : Union[str, Any] = dataset.map(
A_ , batched=A_ , remove_columns=["idx", "sentence1", "sentence2"] , )
a : Any = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(A_ ):
if use_longest:
return tokenizer.pad(A_ , padding="longest" , return_tensors="pt" )
return tokenizer.pad(A_ , padding="max_length" , max_length=128 , return_tensors="pt" )
return DataLoader(A_ , shuffle=A_ , collate_fn=A_ , batch_size=16 )
def lowercase ( A_ , A_ )-> Tuple:
'''simple docstring'''
a : Tuple = Accelerator(dispatch_batches=A_ , split_batches=A_ )
a : List[str] = get_dataloader(A_ , not dispatch_batches )
a : List[Any] = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" , return_dict=A_ )
a , a : List[str] = accelerator.prepare(A_ , A_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowercase ( A_ , A_ , A_ )-> Tuple:
'''simple docstring'''
a : Dict = []
for batch in dataloader:
a , a : Optional[int] = batch.values()
with torch.no_grad():
a : Union[str, Any] = model(A_ )
a , a : str = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
a , a : Any = [], []
for logit, targ in logits_and_targets:
logits.append(A_ )
targs.append(A_ )
a , a : List[str] = torch.cat(A_ ), torch.cat(A_ )
return logits, targs
def lowercase ( A_ , A_=82 , A_=False , A_=False , A_=16 )-> str:
'''simple docstring'''
a , a , a : Tuple = get_basic_setup(A_ , A_ , A_ )
a , a : Dict = generate_predictions(A_ , A_ , A_ )
assert (
len(A_ ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(A_ )}'''
def lowercase ( A_ = False , A_ = False )-> Union[str, Any]:
'''simple docstring'''
a : Any = evaluate.load("glue" , "mrpc" )
a , a : List[str] = get_mrpc_setup(A_ , A_ )
# First do baseline
a , a , a : Any = setup["no"]
model.to(A_ )
model.eval()
for batch in dataloader:
batch.to(A_ )
with torch.inference_mode():
a : Optional[int] = model(**A_ )
a : Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=A_ , references=batch["labels"] )
a : Optional[int] = metric.compute()
# Then do distributed
a , a , a : List[str] = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
a : Tuple = model(**A_ )
a : Dict = outputs.logits.argmax(dim=-1 )
a : List[str] = batch["labels"]
a , a : Tuple = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=A_ , references=A_ )
a : List[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def lowercase ( )-> List[str]:
'''simple docstring'''
a : int = Accelerator(split_batches=A_ , dispatch_batches=A_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(A_ , A_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
a : Optional[Any] = Accelerator(split_batches=A_ , dispatch_batches=A_ )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(A_ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
a : Dict = Accelerator()
test_torch_metrics(A_ , 512 )
accelerator.state._reset_state()
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 40
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase = {
"""configuration_rag""": ["""RagConfig"""],
"""retrieval_rag""": ["""RagRetriever"""],
"""tokenization_rag""": ["""RagTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""RagModel""",
"""RagPreTrainedModel""",
"""RagSequenceForGeneration""",
"""RagTokenForGeneration""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TFRagModel""",
"""TFRagPreTrainedModel""",
"""TFRagSequenceForGeneration""",
"""TFRagTokenForGeneration""",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40
| 1
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__lowercase = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
__lowercase = {"""facebook/blenderbot-3B""": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowercase ( )-> List[Any]:
'''simple docstring'''
a : int = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
a : Any = bs[:]
a : Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A_ )
cs.append(2**8 + n )
n += 1
a : Optional[Any] = [chr(A_ ) for n in cs]
return dict(zip(A_ , A_ ) )
def lowercase ( A_ )-> Dict:
'''simple docstring'''
a : List[Any] = set()
a : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a : int = char
return pairs
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict="replace" , __UpperCAmelCase : Tuple="<s>" , __UpperCAmelCase : Any="</s>" , __UpperCAmelCase : List[Any]="</s>" , __UpperCAmelCase : str="<s>" , __UpperCAmelCase : Optional[Any]="<unk>" , __UpperCAmelCase : Optional[int]="<pad>" , __UpperCAmelCase : Optional[Any]="<mask>" , __UpperCAmelCase : Any=False , **__UpperCAmelCase : List[Any] , ):
a : Tuple = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else bos_token
a : Union[str, Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else eos_token
a : Optional[int] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else sep_token
a : Optional[int] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else cls_token
a : Optional[Any] = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else unk_token
a : Tuple = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a : Dict = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else mask_token
super().__init__(
errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
with open(__UpperCAmelCase , encoding="utf-8") as vocab_handle:
a : str = json.load(__UpperCAmelCase)
a : str = {v: k for k, v in self.encoder.items()}
a : str = errors # how to handle errors in decoding
a : str = bytes_to_unicode()
a : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCAmelCase , encoding="utf-8") as merges_handle:
a : Optional[Any] = merges_handle.read().split("\n")[1:-1]
a : List[str] = [tuple(merge.split()) for merge in bpe_merges]
a : Optional[Any] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase))))
a : Any = {}
a : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
a : str = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+")
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def __snake_case ( self : Tuple):
return len(self.encoder)
def __snake_case ( self : Optional[Any]):
return dict(self.encoder , **self.added_tokens_encoder)
def __snake_case ( self : int , __UpperCAmelCase : str):
if token in self.cache:
return self.cache[token]
a : str = tuple(__UpperCAmelCase)
a : Dict = get_pairs(__UpperCAmelCase)
if not pairs:
return token
while True:
a : Optional[int] = min(__UpperCAmelCase , key=lambda __UpperCAmelCase: self.bpe_ranks.get(__UpperCAmelCase , float("inf")))
if bigram not in self.bpe_ranks:
break
a , a : str = bigram
a : Optional[int] = []
a : List[str] = 0
while i < len(__UpperCAmelCase):
try:
a : List[str] = word.index(__UpperCAmelCase , __UpperCAmelCase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
a : int = j
if word[i] == first and i < len(__UpperCAmelCase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
a : int = tuple(__UpperCAmelCase)
a : List[Any] = new_word
if len(__UpperCAmelCase) == 1:
break
else:
a : Dict = get_pairs(__UpperCAmelCase)
a : Dict = " ".join(__UpperCAmelCase)
a : str = word
return word
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Optional[Any]):
a : Optional[Any] = []
for token in re.findall(self.pat , __UpperCAmelCase):
a : Dict = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCAmelCase).split(" "))
return bpe_tokens
def __snake_case ( self : int , __UpperCAmelCase : List[Any]):
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token))
def __snake_case ( self : Tuple , __UpperCAmelCase : Union[str, Any]):
return self.decoder.get(__UpperCAmelCase)
def __snake_case ( self : List[Any] , __UpperCAmelCase : Any):
a : Tuple = "".join(__UpperCAmelCase)
a : Optional[Any] = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8" , errors=self.errors)
return text
def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None):
if not os.path.isdir(__UpperCAmelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
a : Dict = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a : int = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(__UpperCAmelCase , "w" , encoding="utf-8") as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase) + "\n")
a : str = 0
with open(__UpperCAmelCase , "w" , encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase: kv[1]):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!")
a : List[str] = token_index
writer.write(" ".join(__UpperCAmelCase) + "\n")
index += 1
return vocab_file, merge_file
def __snake_case ( self : str , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase)
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase)) + [1]
return [1] + ([0] * len(__UpperCAmelCase)) + [1, 1] + ([0] * len(__UpperCAmelCase)) + [1]
def __snake_case ( self : Any , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None):
a : Union[str, Any] = [self.sep_token_id]
a : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def __snake_case ( self : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any]=False , **__UpperCAmelCase : List[Any]):
a : Any = kwargs.pop("add_prefix_space" , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(__UpperCAmelCase) > 0 and not text[0].isspace()):
a : List[Any] = " " + text
return (text, kwargs)
def __snake_case ( self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None):
return token_ids_a + [self.eos_token_id]
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : "Conversation"):
a : List[str] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text)
else:
# Generated responses should contain them already.
inputs.append(__UpperCAmelCase)
a : Tuple = " ".join(__UpperCAmelCase)
a : Optional[Any] = self.encode(__UpperCAmelCase)
if len(__UpperCAmelCase) > self.model_max_length:
a : int = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''')
return input_ids
| 40
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _A ( _a ,_a ,_a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : str = StableDiffusionInpaintPipeline
UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase : Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase : int = frozenset([] )
def __snake_case ( self : Dict):
torch.manual_seed(0)
a : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , )
a : Tuple = PNDMScheduler(skip_prk_steps=__UpperCAmelCase)
torch.manual_seed(0)
a : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
a : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
a : Any = CLIPTextModel(__UpperCAmelCase)
a : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
a : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any]=0):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase)
a : List[str] = image.cpu().permute(0 , 2 , 3 , 1)[0]
a : Union[str, Any] = Image.fromarray(np.uinta(__UpperCAmelCase)).convert("RGB").resize((64, 64))
a : Dict = Image.fromarray(np.uinta(image + 4)).convert("RGB").resize((64, 64))
if str(__UpperCAmelCase).startswith("mps"):
a : Tuple = torch.manual_seed(__UpperCAmelCase)
else:
a : Tuple = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase)
a : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __snake_case ( self : List[str]):
a : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
a : Tuple = self.get_dummy_components()
a : Optional[int] = StableDiffusionInpaintPipeline(**__UpperCAmelCase)
a : int = sd_pipe.to(__UpperCAmelCase)
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a : Any = self.get_dummy_inputs(__UpperCAmelCase)
a : Optional[int] = sd_pipe(**__UpperCAmelCase).images
a : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a : int = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __snake_case ( self : str):
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Union[str, Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Dict):
a : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
a : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
a : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy")
a : Tuple = "stabilityai/stable-diffusion-2-inpainting"
a : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(__UpperCAmelCase , safety_checker=__UpperCAmelCase)
pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
pipe.enable_attention_slicing()
a : Any = "Face of a yellow cat, high resolution, sitting on a park bench"
a : str = torch.manual_seed(0)
a : Union[str, Any] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 9e-3
def __snake_case ( self : Any):
a : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
a : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
a : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy")
a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting"
a : Any = StableDiffusionInpaintPipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=__UpperCAmelCase , )
pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
pipe.enable_attention_slicing()
a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench"
a : Dict = torch.manual_seed(0)
a : List[Any] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , )
a : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5e-1
def __snake_case ( self : int):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
a : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting"
a : Optional[int] = PNDMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler")
a : int = StableDiffusionInpaintPipeline.from_pretrained(
__UpperCAmelCase , safety_checker=__UpperCAmelCase , scheduler=__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench"
a : Optional[int] = torch.manual_seed(0)
a : str = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="np" , )
a : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 40
| 1
|
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__lowercase = logging.getLogger(__name__)
def lowercase ( A_ , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = False , )-> Optional[int]:
'''simple docstring'''
a : Tuple = bnb_quantization_config.load_in_abit
a : Any = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed." )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed." )
a : str = []
# custom device map
if isinstance(A_ , A_ ) and len(device_map.keys() ) > 1:
a : str = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
a : Tuple = get_keys_to_not_convert(A_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(A_ )
a : List[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
a : Union[str, Any] = []
a : Any = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(A_ )
# compatibility with peft
a : List[Any] = load_in_abit
a : Tuple = load_in_abit
a : Optional[int] = get_parameter_device(A_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager." )
a : Union[str, Any] = replace_with_bnb_layers(A_ , A_ , modules_to_not_convert=A_ )
# convert param to the right dtype
a : List[str] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
a : Dict = name.replace(".weight" , "" ).replace(".bias" , "" )
a : Optional[int] = getattr(A_ , A_ , A_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(A_ ):
param.to(A_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"We move the model to cuda." )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
a : Optional[Any] = replace_with_bnb_layers(
A_ , A_ , modules_to_not_convert=A_ )
a : Dict = get_quantized_model_device_map(
A_ , A_ , A_ , max_memory=A_ , no_split_module_classes=A_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
a : Dict = True
a : List[Any] = any(x in list(device_map.values() ) for x in ["cpu", "disk"] )
load_checkpoint_in_model(
A_ , A_ , A_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=A_ , offload_state_dict=A_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(A_ , device_map=A_ , offload_dir=A_ )
def lowercase ( A_ , A_ , A_=None , A_=None , A_=None )-> Any:
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
a : Dict = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." )
if isinstance(A_ , A_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'." )
a : List[Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
a : Any = {}
a : Union[str, Any] = special_dtypes
a : Optional[Any] = no_split_module_classes
a : List[str] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
a : List[str] = get_balanced_memory(
A_ , low_zero=(device_map == "balanced_low_0") , max_memory=A_ , **A_ , )
a : List[str] = max_memory
a : Dict = infer_auto_device_map(A_ , **A_ )
if isinstance(A_ , A_ ):
# check if don't have any quantized module on the cpu
a : Any = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
a : List[str] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " )
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" )
del device_map_without_some_modules
return device_map
def lowercase ( A_ , A_ , A_=None , A_=None )-> Any:
'''simple docstring'''
if modules_to_not_convert is None:
a : int = []
a , a : Tuple = _replace_with_bnb_layers(
A_ , A_ , A_ , A_ )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def lowercase ( A_ , A_ , A_=None , A_=None , )-> List[Any]:
'''simple docstring'''
a : Optional[int] = False
for name, module in model.named_children():
if current_key_name is None:
a : List[Any] = []
current_key_name.append(A_ )
if isinstance(A_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
a : List[str] = ".".join(A_ )
a : Any = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
a : Optional[Any] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
a : Tuple = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
a : Union[str, Any] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False" )
a : Optional[Any] = module.weight.data
if module.bias is not None:
a : Union[str, Any] = module.bias.data
bnb_module.requires_grad_(A_ )
setattr(A_ , A_ , A_ )
a : Dict = True
if len(list(module.children() ) ) > 0:
a , a : Optional[Any] = _replace_with_bnb_layers(
A_ , A_ , A_ , A_ )
a : List[Any] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowercase ( A_ )-> Union[str, Any]:
'''simple docstring'''
with init_empty_weights():
a : Dict = deepcopy(A_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
a : Any = find_tied_parameters(A_ )
# For compatibility with Accelerate < 0.18
if isinstance(A_ , A_ ):
a : List[Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
a : Union[str, Any] = sum(A_ , [] )
a : List[Any] = len(A_ ) > 0
# Check if it is a base model
a : Optional[int] = False
if hasattr(A_ , "base_model_prefix" ):
a : int = not hasattr(A_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
a : Optional[Any] = list(model.named_children() )
a : str = [list_modules[-1][0]]
# add last module together with tied weights
a : Tuple = set(A_ ) - set(A_ )
a : Optional[int] = list(set(A_ ) ) + list(A_ )
# remove ".weight" from the keys
a : Tuple = [".weight", ".bias"]
a : Optional[Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
a : str = name.replace(A_ , "" )
filtered_module_names.append(A_ )
return filtered_module_names
def lowercase ( A_ )-> List[Any]:
'''simple docstring'''
for m in model.modules():
if isinstance(A_ , bnb.nn.Linearabit ):
return True
return False
def lowercase ( A_ )-> Any:
'''simple docstring'''
return next(parameter.parameters() ).device
def lowercase ( A_ , A_ , A_ , A_ , A_ , A_ , A_ )-> str:
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(A_ , A_ , 0 , dtype=A_ , value=A_ )
a : Optional[int] = param_name
a : Union[str, Any] = model
if "." in tensor_name:
a : Optional[Any] = tensor_name.split("." )
for split in splits[:-1]:
a : str = getattr(A_ , A_ )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
a : int = new_module
a : Tuple = splits[-1]
# offload weights
a : List[Any] = False
offload_weight(module._parameters[tensor_name] , A_ , A_ , index=A_ )
if hasattr(module._parameters[tensor_name] , "SCB" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , A_ , index=A_ , )
else:
offload_weight(A_ , A_ , A_ , index=A_ )
offload_weight(A_ , param_name.replace("weight" , "SCB" ) , A_ , index=A_ )
set_module_tensor_to_device(A_ , A_ , "meta" , dtype=A_ , value=torch.empty(*param.size() ) )
| 40
|
"""simple docstring"""
def lowercase ( A_ )-> bool:
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
a : Tuple = sorted(string.lower() )
return len(A_ ) == len(set(A_ ) )
if __name__ == "__main__":
__lowercase = input("""Enter a string """).strip()
__lowercase = is_isogram(input_str)
print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
| 40
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def lowercase ( A_ , A_ , A_=8 )-> List[Any]:
'''simple docstring'''
a : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _A ( _a ):
"""simple docstring"""
def __init__( self : Tuple , __UpperCAmelCase : UNetaDConditionModel , __UpperCAmelCase : DDPMScheduler , __UpperCAmelCase : VQModel , ):
super().__init__()
self.register_modules(
unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , movq=__UpperCAmelCase , )
a : int = 2 ** (len(self.movq.config.block_out_channels) - 1)
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str]):
if latents is None:
a : Any = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase)
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''')
a : Dict = latents.to(__UpperCAmelCase)
a : Dict = latents * scheduler.init_noise_sigma
return latents
def __snake_case ( self : Any , __UpperCAmelCase : List[Any]=0):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`")
a : Dict = torch.device(f'''cuda:{gpu_id}''')
a : Any = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : List[Any] , __UpperCAmelCase : Union[str, Any]=0):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0"):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
a : List[Any] = torch.device(f'''cuda:{gpu_id}''')
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=__UpperCAmelCase)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a : Any = None
for cpu_offloaded_model in [self.unet, self.movq]:
a , a : Tuple = cpu_offload_with_hook(__UpperCAmelCase , __UpperCAmelCase , prev_module_hook=__UpperCAmelCase)
# We'll offload the last model manually.
a : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __snake_case ( self : Optional[int]):
if not hasattr(self.unet , "_hf_hook"):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCAmelCase , "_hf_hook")
and hasattr(module._hf_hook , "execution_device")
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCAmelCase)
def __call__( self : Optional[Any] , __UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 100 , __UpperCAmelCase : float = 4.0 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , ):
a : Optional[int] = self._execution_device
a : int = guidance_scale > 1.0
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Union[str, Any] = torch.cat(__UpperCAmelCase , dim=0)
a : int = image_embeds.shape[0] * num_images_per_prompt
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : List[str] = torch.cat(__UpperCAmelCase , dim=0)
if do_classifier_free_guidance:
a : Union[str, Any] = image_embeds.repeat_interleave(__UpperCAmelCase , dim=0)
a : Tuple = negative_image_embeds.repeat_interleave(__UpperCAmelCase , dim=0)
a : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0).to(dtype=self.unet.dtype , device=__UpperCAmelCase)
self.scheduler.set_timesteps(__UpperCAmelCase , device=__UpperCAmelCase)
a : Union[str, Any] = self.scheduler.timesteps
a : Any = self.unet.config.in_channels
a , a : str = downscale_height_and_width(__UpperCAmelCase , __UpperCAmelCase , self.movq_scale_factor)
# create initial latent
a : Any = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__UpperCAmelCase)):
# expand the latents if we are doing classifier free guidance
a : str = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
a : Dict = {"image_embeds": image_embeds}
a : Tuple = self.unet(
sample=__UpperCAmelCase , timestep=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , added_cond_kwargs=__UpperCAmelCase , return_dict=__UpperCAmelCase , )[0]
if do_classifier_free_guidance:
a , a : int = noise_pred.split(latents.shape[1] , dim=1)
a , a : Any = noise_pred.chunk(2)
a , a : int = variance_pred.chunk(2)
a : Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a : str = torch.cat([noise_pred, variance_pred_text] , dim=1)
if not (
hasattr(self.scheduler.config , "variance_type")
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a , a : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1)
# compute the previous noisy sample x_t -> x_t-1
a : Any = self.scheduler.step(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase , )[0]
# post-processing
a : Tuple = self.movq.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase)["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''')
if output_type in ["np", "pil"]:
a : Optional[Any] = image * 0.5 + 0.5
a : Union[str, Any] = image.clamp(0 , 1)
a : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
a : Optional[int] = self.numpy_to_pil(__UpperCAmelCase)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase)
| 40
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__lowercase = datasets.utils.logging.get_logger(__name__)
@dataclass
class _A ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase : int = 1_0_0_0_0
UpperCAmelCase : Optional[List[str]] = None
UpperCAmelCase : Optional[datasets.Features] = None
class _A ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase : str = ParquetConfig
def __snake_case ( self : Tuple):
return datasets.DatasetInfo(features=self.config.features)
def __snake_case ( self : List[Any] , __UpperCAmelCase : str):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''')
a : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__UpperCAmelCase , (str, list, tuple)):
a : Dict = data_files
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a : List[Any] = [dl_manager.iter_files(__UpperCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})]
a : Dict = []
for split_name, files in data_files.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a : Tuple = [dl_manager.iter_files(__UpperCAmelCase) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__UpperCAmelCase):
with open(__UpperCAmelCase , "rb") as f:
a : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase))
break
splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"files": files}))
return splits
def __snake_case ( self : List[str] , __UpperCAmelCase : pa.Table):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
a : Optional[int] = table_cast(__UpperCAmelCase , self.info.features.arrow_schema)
return pa_table
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int):
a : Tuple = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema) != sorted(self.config.columns):
raise ValueError(
f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''')
for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase)):
with open(__UpperCAmelCase , "rb") as f:
a : Tuple = pq.ParquetFile(__UpperCAmelCase)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)):
a : Optional[Any] = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'''{file_idx}_{batch_idx}''', self._cast_table(__UpperCAmelCase)
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(__UpperCAmelCase)}: {e}''')
raise
| 40
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__lowercase = logging.get_logger(__name__)
class _A ( _a ):
"""simple docstring"""
def __init__( self : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : float , **__UpperCAmelCase : Optional[int]):
a : Optional[Any] = feature_size
a : Tuple = sampling_rate
a : Any = padding_value
a : Union[str, Any] = kwargs.pop("padding_side" , "right")
a : List[str] = kwargs.pop("return_attention_mask" , __UpperCAmelCase)
super().__init__(**__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __UpperCAmelCase : Union[bool, str, PaddingStrategy] = True , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(__UpperCAmelCase , (list, tuple)) and isinstance(processed_features[0] , (dict, BatchFeature)):
a : int = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
f''' to this method that includes {self.model_input_names[0]}, but you provided'''
f''' {list(processed_features.keys())}''')
a : Any = processed_features[self.model_input_names[0]]
a : Any = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__UpperCAmelCase) == 0:
if return_attention_mask:
a : List[Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
a : Tuple = required_input[0]
if isinstance(__UpperCAmelCase , (list, tuple)):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
a : Union[str, Any] = 0
while len(required_input[index]) == 0:
index += 1
if index < len(__UpperCAmelCase):
a : Optional[Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__UpperCAmelCase):
a : str = "tf"
elif is_torch_tensor(__UpperCAmelCase):
a : Optional[Any] = "pt"
elif isinstance(__UpperCAmelCase , (int, float, list, tuple, np.ndarray)):
a : List[str] = "np"
else:
raise ValueError(
f'''type of {first_element} unknown: {type(__UpperCAmelCase)}. '''
"Should be one of a python, numpy, pytorch or tensorflow object.")
for key, value in processed_features.items():
if isinstance(value[0] , (int, float)):
a : Any = to_numpy(__UpperCAmelCase)
else:
a : int = [to_numpy(__UpperCAmelCase) for v in value]
# Convert padding_strategy in PaddingStrategy
a : Any = self._get_padding_strategies(padding=__UpperCAmelCase , max_length=__UpperCAmelCase)
a : int = processed_features[self.model_input_names[0]]
a : Union[str, Any] = len(__UpperCAmelCase)
if not all(len(__UpperCAmelCase) == batch_size for v in processed_features.values()):
raise ValueError("Some items in the output dictionary have a different batch size than others.")
a : Optional[int] = []
for i in range(__UpperCAmelCase):
a : Tuple = {k: v[i] for k, v in processed_features.items()}
# truncation
a : List[str] = self._truncate(
__UpperCAmelCase , max_length=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , truncation=__UpperCAmelCase , )
truncated_inputs.append(__UpperCAmelCase)
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
a : Any = max(len(input_slice[self.model_input_names[0]]) for input_slice in truncated_inputs)
a : List[Any] = PaddingStrategy.MAX_LENGTH
a : Optional[Any] = {}
for i in range(__UpperCAmelCase):
# padding
a : Optional[Any] = self._pad(
truncated_inputs[i] , max_length=__UpperCAmelCase , padding_strategy=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
a : int = []
if value.dtype is np.dtype(np.floataa):
a : Any = value.astype(np.floataa)
batch_outputs[key].append(__UpperCAmelCase)
return BatchFeature(__UpperCAmelCase , tensor_type=__UpperCAmelCase)
def __snake_case ( self : List[str] , __UpperCAmelCase : Union[Dict[str, np.ndarray], BatchFeature] , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , ):
a : Tuple = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
a : List[Any] = len(__UpperCAmelCase)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
a : Optional[int] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
a : Union[str, Any] = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__UpperCAmelCase) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
a : str = np.ones(len(__UpperCAmelCase) , dtype=np.intaa)
if needs_to_be_padded:
a : Dict = max_length - len(__UpperCAmelCase)
if self.padding_side == "right":
if return_attention_mask:
a : str = np.pad(
processed_features["attention_mask"] , (0, difference))
a : List[str] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
a : str = np.pad(
__UpperCAmelCase , __UpperCAmelCase , "constant" , constant_values=self.padding_value)
elif self.padding_side == "left":
if return_attention_mask:
a : List[str] = np.pad(
processed_features["attention_mask"] , (difference, 0))
a : Tuple = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
a : int = np.pad(
__UpperCAmelCase , __UpperCAmelCase , "constant" , constant_values=self.padding_value)
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side))
return processed_features
def __snake_case ( self : List[str] , __UpperCAmelCase : Union[Dict[str, np.ndarray], BatchFeature] , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined.")
a : str = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
a : str = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
a : Tuple = len(__UpperCAmelCase) > max_length
if needs_to_be_truncated:
a : int = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
a : List[str] = processed_features["attention_mask"][:max_length]
return processed_features
def __snake_case ( self : Dict , __UpperCAmelCase : Any=False , __UpperCAmelCase : Optional[int]=None):
# Get padding strategy
if padding is not False:
if padding is True:
a : List[str] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Tuple = PaddingStrategy(__UpperCAmelCase)
elif isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : str = padding
else:
a : List[str] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''')
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.")
return padding_strategy
| 40
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : int = """dpr"""
def __init__( self : List[Any] , __UpperCAmelCase : int=30522 , __UpperCAmelCase : Union[str, Any]=768 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : str="absolute" , __UpperCAmelCase : int = 0 , **__UpperCAmelCase : Tuple , ):
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a : List[Any] = vocab_size
a : Optional[Any] = hidden_size
a : Union[str, Any] = num_hidden_layers
a : Dict = num_attention_heads
a : int = hidden_act
a : Any = intermediate_size
a : Any = hidden_dropout_prob
a : Dict = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Union[str, Any] = type_vocab_size
a : Optional[Any] = initializer_range
a : Dict = layer_norm_eps
a : int = projection_dim
a : str = position_embedding_type
| 40
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__lowercase = logging.get_logger(__name__)
class _A ( _a ):
"""simple docstring"""
def __init__( self : List[str] , *__UpperCAmelCase : Tuple , **__UpperCAmelCase : List[Any]):
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
| 40
|
"""simple docstring"""
class _A :
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : int):
a : Tuple = size
a : Dict = [0] * size
a : Optional[int] = [0] * size
@staticmethod
def __snake_case ( __UpperCAmelCase : int):
return index | (index + 1)
@staticmethod
def __snake_case ( __UpperCAmelCase : int):
return (index & (index + 1)) - 1
def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int):
a : Union[str, Any] = value
while index < self.size:
a : Dict = self.get_prev(__UpperCAmelCase) + 1
if current_left_border == index:
a : Optional[int] = value
else:
a : Any = max(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = self.get_next(__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int):
right -= 1 # Because of right is exclusive
a : List[str] = 0
while left <= right:
a : Dict = self.get_prev(__UpperCAmelCase)
if left <= current_left:
a : Optional[int] = max(__UpperCAmelCase , self.tree[right])
a : Optional[Any] = current_left
else:
a : List[str] = max(__UpperCAmelCase , self.arr[right])
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 1
|
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _A ( _a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Any=13 , __UpperCAmelCase : Optional[int]=7 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : str=True , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Union[str, Any]=99 , __UpperCAmelCase : Any=32 , __UpperCAmelCase : Optional[Any]=5 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : str=37 , __UpperCAmelCase : str="gelu" , __UpperCAmelCase : List[str]=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : int=16 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : str=False , __UpperCAmelCase : Any=True , __UpperCAmelCase : Optional[int]="None" , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : List[Any]=None , ):
a : Tuple = parent
a : Dict = batch_size
a : List[Any] = seq_length
a : Optional[Any] = is_training
a : Optional[Any] = use_input_mask
a : List[Any] = use_token_type_ids
a : Any = use_labels
a : Optional[int] = vocab_size
a : str = hidden_size
a : Any = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : Optional[Any] = intermediate_size
a : str = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : List[Any] = max_position_embeddings
a : str = type_vocab_size
a : List[Any] = type_sequence_label_size
a : List[Any] = initializer_range
a : Any = num_labels
a : Dict = num_choices
a : Any = relative_attention
a : Union[str, Any] = position_biased_input
a : Tuple = pos_att_type
a : List[Any] = scope
def __snake_case ( self : int):
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : Union[str, Any] = None
if self.use_input_mask:
a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
a : Dict = None
if self.use_token_type_ids:
a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a : Optional[Any] = None
a : Dict = None
a : List[str] = None
if self.use_labels:
a : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : Tuple = ids_tensor([self.batch_size] , self.num_choices)
a : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Tuple):
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __snake_case ( self : Tuple):
a : Any = self.get_config()
a : int = 300
return config
def __snake_case ( self : Dict , __UpperCAmelCase : List[str]):
self.parent.assertListEqual(list(result.loss.size()) , [])
def __snake_case ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int]):
a : Dict = DebertaModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : Optional[int] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)[0]
a : Any = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase)[0]
a : Any = model(__UpperCAmelCase)[0]
self.parent.assertListEqual(list(sequence_output.size()) , [self.batch_size, self.seq_length, self.hidden_size])
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Any , __UpperCAmelCase : int):
a : Any = DebertaForMaskedLM(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __snake_case ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any]):
a : List[str] = self.num_labels
a : Union[str, Any] = DebertaForSequenceClassification(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertListEqual(list(result.logits.size()) , [self.batch_size, self.num_labels])
self.check_loss_output(__UpperCAmelCase)
def __snake_case ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : str):
a : Optional[Any] = self.num_labels
a : Tuple = DebertaForTokenClassification(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __snake_case ( self : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any]):
a : Optional[int] = DebertaForQuestionAnswering(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : Optional[int] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __snake_case ( self : int):
a : Union[str, Any] = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Tuple = config_and_inputs
a : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _A ( _a ,_a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : List[str] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Optional[int] = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase : List[Any] = True
UpperCAmelCase : str = False
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : int = False
UpperCAmelCase : str = False
def __snake_case ( self : List[str]):
a : int = DebertaModelTester(self)
a : Optional[int] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def __snake_case ( self : Tuple):
self.config_tester.run_common_tests()
def __snake_case ( self : List[Any]):
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__UpperCAmelCase)
def __snake_case ( self : Union[str, Any]):
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__UpperCAmelCase)
def __snake_case ( self : List[Any]):
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__UpperCAmelCase)
def __snake_case ( self : int):
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__UpperCAmelCase)
def __snake_case ( self : str):
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__UpperCAmelCase)
@slow
def __snake_case ( self : str):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : str = DebertaModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="Model not available yet")
def __snake_case ( self : int):
pass
@slow
def __snake_case ( self : int):
a : Dict = DebertaModel.from_pretrained("microsoft/deberta-base")
a : Optional[Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
a : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
a : int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)[0]
# compare the actual values for a slice.
a : Optional[int] = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1e-4) , f'''{output[:, 1:4, 1:4]}''')
| 40
|
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[Any]):
a : str = 0
a : Optional[int] = [0]
a : Union[str, Any] = [0]
a : Any = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0)
a : List[str] = [60]
a : str = [10]
a : Optional[int] = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0)
def __snake_case ( self : Optional[int]):
a : Any = 3
a : str = [1, 2, 3]
a : Tuple = [3, 2, 1]
a : Any = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5)
def __snake_case ( self : Tuple):
a : int = 50
a : List[Any] = [60, 100, 120]
a : Optional[int] = [10, 20, 30]
a : str = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220)
if __name__ == "__main__":
unittest.main()
| 40
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def lowercase ( A_ )-> YolosConfig:
'''simple docstring'''
a : Any = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
a : List[str] = 192
a : int = 768
a : Dict = 12
a : Any = 3
a : List[Any] = [800, 1_333]
a : List[str] = False
elif yolos_name == "yolos_s_dWr":
a : str = 330
a : Optional[int] = 14
a : Dict = 6
a : Union[str, Any] = 1_320
elif "yolos_s" in yolos_name:
a : Any = 384
a : List[Any] = 1_536
a : Any = 12
a : List[Any] = 6
elif "yolos_b" in yolos_name:
a : Dict = [800, 1_344]
a : Tuple = 91
a : Union[str, Any] = "huggingface/label-files"
a : str = "coco-detection-id2label.json"
a : Union[str, Any] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
a : int = {int(A_ ): v for k, v in idalabel.items()}
a : List[Any] = idalabel
a : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase ( A_ , A_ , A_ = False )-> Tuple:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a : Optional[int] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
a : Any = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a : Optional[Any] = in_proj_weight[: config.hidden_size, :]
a : Any = in_proj_bias[: config.hidden_size]
a : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a : Any = in_proj_weight[-config.hidden_size :, :]
a : str = in_proj_bias[-config.hidden_size :]
def lowercase ( A_ )-> str:
'''simple docstring'''
if "backbone" in name:
a : Any = name.replace("backbone" , "vit" )
if "cls_token" in name:
a : str = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
a : Union[str, Any] = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
a : Tuple = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
a : Optional[Any] = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
a : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
a : Optional[int] = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
a : List[str] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
a : Optional[int] = name.replace("attn" , "attention.self" )
if "norm1" in name:
a : List[Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
a : List[Any] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
a : int = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a : str = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
a : Union[str, Any] = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
a : int = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
a : Union[str, Any] = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowercase ( A_ , A_ )-> dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
a : Optional[int] = orig_state_dict.pop(A_ )
if "qkv" in key:
a : Optional[Any] = key.split("." )
a : List[str] = int(key_split[2] )
a : int = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
a : Union[str, Any] = val[:dim, :]
a : Union[str, Any] = val[
dim : dim * 2, :
]
a : List[str] = val[-dim:, :]
else:
a : Optional[Any] = val[:dim]
a : List[Any] = val[dim : dim * 2]
a : int = val[-dim:]
else:
a : int = val
return orig_state_dict
def lowercase ( )-> torch.Tensor:
'''simple docstring'''
a : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
a : str = Image.open(requests.get(A_ , stream=A_ ).raw )
return im
@torch.no_grad()
def lowercase ( A_ , A_ , A_ , A_ = False )-> Any:
'''simple docstring'''
a : str = get_yolos_config(A_ )
# load original state_dict
a : List[str] = torch.load(A_ , map_location="cpu" )["model"]
# load 🤗 model
a : List[str] = YolosForObjectDetection(A_ )
model.eval()
a : Tuple = convert_state_dict(A_ , A_ )
model.load_state_dict(A_ )
# Check outputs on an image, prepared by YolosImageProcessor
a : Dict = 800 if yolos_name != "yolos_ti" else 512
a : List[str] = YolosImageProcessor(format="coco_detection" , size=A_ )
a : Optional[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
a : str = model(**A_ )
a , a : Tuple = outputs.logits, outputs.pred_boxes
a , a : Dict = None, None
if yolos_name == "yolos_ti":
a : str = torch.tensor(
[[-3_9.5_0_2_2, -1_1.9_8_2_0, -1_7.6_8_8_8], [-2_9.9_5_7_4, -9.9_7_6_9, -1_7.7_6_9_1], [-4_2.3_2_8_1, -2_0.7_2_0_0, -3_0.6_2_9_4]] )
a : int = torch.tensor(
[[0.4_0_2_1, 0.0_8_3_6, 0.7_9_7_9], [0.0_1_8_4, 0.2_6_0_9, 0.0_3_6_4], [0.1_7_8_1, 0.2_0_0_4, 0.2_0_9_5]] )
elif yolos_name == "yolos_s_200_pre":
a : Any = torch.tensor(
[[-2_4.0_2_4_8, -1_0.3_0_2_4, -1_4.8_2_9_0], [-4_2.0_3_9_2, -1_6.8_2_0_0, -2_7.4_3_3_4], [-2_7.2_7_4_3, -1_1.8_1_5_4, -1_8.7_1_4_8]] )
a : str = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] )
elif yolos_name == "yolos_s_300_pre":
a : Optional[Any] = torch.tensor(
[[-3_6.2_2_2_0, -1_4.4_3_8_5, -2_3.5_4_5_7], [-3_5.6_9_7_0, -1_4.7_5_8_3, -2_1.3_9_3_5], [-3_1.5_9_3_9, -1_3.6_0_4_2, -1_6.8_0_4_9]] )
a : Optional[int] = torch.tensor(
[[0.7_6_1_4, 0.2_3_1_6, 0.4_7_2_8], [0.7_1_6_8, 0.4_4_9_5, 0.3_8_5_5], [0.4_9_9_6, 0.1_4_6_6, 0.9_9_9_6]] )
elif yolos_name == "yolos_s_dWr":
a : List[str] = torch.tensor(
[[-4_2.8_6_6_8, -2_4.1_0_4_9, -4_1.1_6_9_0], [-3_4.7_4_5_6, -1_4.1_2_7_4, -2_4.9_1_9_4], [-3_3.7_8_9_8, -1_2.1_9_4_6, -2_5.6_4_9_5]] )
a : List[Any] = torch.tensor(
[[0.5_5_8_7, 0.2_7_7_3, 0.0_6_0_5], [0.5_0_0_4, 0.3_0_1_4, 0.9_9_9_4], [0.4_9_9_9, 0.1_5_4_8, 0.9_9_9_4]] )
elif yolos_name == "yolos_base":
a : Any = torch.tensor(
[[-4_0.6_0_6_4, -2_4.3_0_8_4, -3_2.6_4_4_7], [-5_5.1_9_9_0, -3_0.7_7_1_9, -3_5.5_8_7_7], [-5_1.4_3_1_1, -3_3.3_5_0_7, -3_5.6_4_6_2]] )
a : Union[str, Any] = torch.tensor(
[[0.5_5_5_5, 0.2_7_9_4, 0.0_6_5_5], [0.9_0_4_9, 0.2_6_6_4, 0.1_8_9_4], [0.9_1_8_3, 0.1_9_8_4, 0.1_6_3_5]] )
else:
raise ValueError(F'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , A_ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , A_ , atol=1e-4 )
Path(A_ ).mkdir(exist_ok=A_ )
print(F'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A_ )
if push_to_hub:
a : Tuple = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
a : Any = model_mapping[yolos_name]
image_processor.push_to_hub(A_ , organization="hustvl" )
model.push_to_hub(A_ , organization="hustvl" )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--yolos_name""",
default="""yolos_s_200_pre""",
type=str,
help=(
"""Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"""
""" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."""
),
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original state dict (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowercase = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 40
|
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : str = LayoutLMTokenizer
UpperCAmelCase : int = LayoutLMTokenizerFast
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Optional[Any] = True
def __snake_case ( self : Optional[int]):
super().setUp()
a : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def __snake_case ( self : Optional[int] , **__UpperCAmelCase : Tuple):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str):
a : Tuple = "UNwant\u00E9d,running"
a : Dict = "unwanted, running"
return input_text, output_text
def __snake_case ( self : Any):
a : List[Any] = self.tokenizer_class(self.vocab_file)
a : str = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [7, 4, 5, 10, 8, 9])
def __snake_case ( self : Dict):
pass
| 40
| 1
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
__lowercase = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def lowercase ( )-> List[str]:
'''simple docstring'''
a : Union[str, Any] = Github(os.environ["GITHUB_TOKEN"] )
a : Union[str, Any] = g.get_repo("huggingface/diffusers" )
a : Union[str, Any] = repo.get_issues(state="open" )
for issue in open_issues:
a : str = sorted(issue.get_comments() , key=lambda A_ : i.created_at , reverse=A_ )
a : Tuple = comments[0] if len(A_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 40
|
"""simple docstring"""
def lowercase ( A_ )-> str:
'''simple docstring'''
if isinstance(A_ , A_ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(A_ , A_ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
a : Optional[Any] = False
if num < 0:
a : Tuple = True
a : str = -num
a : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(A_ ) for e in binary )
return "0b" + "".join(str(A_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 1
|
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class _A :
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : int = 13 , __UpperCAmelCase : int = 64 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 3 , __UpperCAmelCase : int = 3 , __UpperCAmelCase : bool = True , __UpperCAmelCase : bool = True , __UpperCAmelCase : int = 128 , __UpperCAmelCase : Dict=[16, 32, 64, 128] , __UpperCAmelCase : int = 7 , __UpperCAmelCase : int = 4 , __UpperCAmelCase : int = 37 , __UpperCAmelCase : str = "gelu" , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : int = 10 , __UpperCAmelCase : float = 0.02 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 128 , __UpperCAmelCase : List[int] = [2, 2, 2, 2] , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 2 , ):
a : int = parent
a : List[str] = batch_size
a : List[Any] = image_size
a : Union[str, Any] = patch_size
a : Any = num_channels
a : Union[str, Any] = is_training
a : Union[str, Any] = use_labels
a : Tuple = hidden_size
a : Tuple = num_hidden_layers
a : int = num_attention_heads
a : Union[str, Any] = intermediate_size
a : int = hidden_act
a : Tuple = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : List[str] = type_sequence_label_size
a : str = initializer_range
a : Optional[int] = encoder_stride
a : Optional[int] = num_attention_outputs
a : Any = embed_dim
a : Dict = embed_dim + 1
a : Union[str, Any] = resolution
a : Optional[int] = depths
a : Union[str, Any] = hidden_sizes
a : Dict = dim
a : List[str] = mlp_expansion_ratio
def __snake_case ( self : Dict):
a : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : str = None
if self.use_labels:
a : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : List[str] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : Optional[int]):
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def __snake_case ( self : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Dict):
a : Optional[Any] = TFEfficientFormerModel(config=__UpperCAmelCase)
a : int = model(__UpperCAmelCase , training=__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __snake_case ( self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple):
a : Optional[int] = self.type_sequence_label_size
a : Union[str, Any] = TFEfficientFormerForImageClassification(__UpperCAmelCase)
a : Dict = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a : Union[str, Any] = 1
a : Any = TFEfficientFormerForImageClassification(__UpperCAmelCase)
a : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : List[str] = model(__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def __snake_case ( self : Tuple):
a : Union[str, Any] = self.prepare_config_and_inputs()
a , a , a : List[str] = config_and_inputs
a : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _A ( _a ,_a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Dict = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase : Optional[Any] = (
{
"""feature-extraction""": TFEfficientFormerModel,
"""image-classification""": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
UpperCAmelCase : Tuple = False
UpperCAmelCase : Optional[Any] = False
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Dict = False
UpperCAmelCase : Union[str, Any] = False
def __snake_case ( self : List[Any]):
a : Any = TFEfficientFormerModelTester(self)
a : Optional[Any] = ConfigTester(
self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37)
def __snake_case ( self : str):
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds")
def __snake_case ( self : int):
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings")
def __snake_case ( self : Optional[Any]):
pass
def __snake_case ( self : Optional[Any]):
a , a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Dict = model_class(__UpperCAmelCase)
a : Optional[Any] = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : List[str] = [*signature.parameters.keys()]
a : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase)
def __snake_case ( self : str):
def check_hidden_states_output(__UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any]):
a : int = model_class(__UpperCAmelCase)
a : Optional[int] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase) , training=__UpperCAmelCase)
a : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a : Optional[Any] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase)
if hasattr(self.model_tester , "encoder_seq_length"):
a : Optional[Any] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length") and self.model_tester.chunk_length > 1:
a : Any = seq_length * self.model_tester.chunk_length
else:
a : str = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
a : Union[str, Any] = outputs.decoder_hidden_states
self.asseretIsInstance(__UpperCAmelCase , (list, tuple))
self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase)
a : int = getattr(self.model_tester , "seq_length" , __UpperCAmelCase)
a : Any = getattr(self.model_tester , "decoder_seq_length" , __UpperCAmelCase)
self.assertListEqual(
list(hidden_states[-1].shape[-2:]) , [decoder_seq_length, self.model_tester.hidden_size] , )
a , a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : int = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Optional[Any] = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any]=False):
a : str = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase)
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __snake_case ( self : str):
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet")
def __snake_case ( self : Union[str, Any]):
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase)
def __snake_case ( self : str):
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase)
@slow
def __snake_case ( self : int):
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Optional[int] = TFEfficientFormerModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
def __snake_case ( self : Union[str, Any]):
a , a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
a : Dict = True
a : Dict = getattr(self.model_tester , "seq_length" , __UpperCAmelCase)
a : List[str] = getattr(self.model_tester , "encoder_seq_length" , __UpperCAmelCase)
a : List[Any] = getattr(self.model_tester , "key_length" , __UpperCAmelCase)
a : List[str] = getattr(self.model_tester , "chunk_length" , __UpperCAmelCase)
if chunk_length is not None and hasattr(self.model_tester , "num_hashes"):
a : Tuple = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
a : Optional[int] = True
a : Dict = False
a : Union[str, Any] = True
a : Any = model_class(__UpperCAmelCase)
a : Union[str, Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase) , training=__UpperCAmelCase)
a : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__UpperCAmelCase) , self.model_tester.num_attention_outputs)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a : str = True
a : Dict = model_class(__UpperCAmelCase)
a : int = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase) , training=__UpperCAmelCase)
a : Any = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__UpperCAmelCase) , self.model_tester.num_attention_outputs)
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:]) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def __snake_case ( self : Optional[int]):
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
a , a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
a : Any = model_class(__UpperCAmelCase)
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
a : str = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__UpperCAmelCase)
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
a : Optional[int] = model(__UpperCAmelCase)
self.assertTrue(outputs_dict is not None)
def lowercase ( )-> Dict:
'''simple docstring'''
a : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class _A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : Tuple):
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300")
if is_vision_available()
else None
)
@slow
def __snake_case ( self : Optional[int]):
a : Any = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300")
a : Optional[int] = self.default_image_processor
a : int = prepare_img()
a : Optional[Any] = image_processor(images=__UpperCAmelCase , return_tensors="tf")
# forward pass
a : Tuple = model(**__UpperCAmelCase , training=__UpperCAmelCase)
# verify the logits
a : str = tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape , __UpperCAmelCase)
a : Any = tf.constant([-0.0_555, 0.4_825, -0.0_852])
self.assertTrue(np.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4))
@slow
def __snake_case ( self : Optional[Any]):
a : List[Any] = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300")
a : int = self.default_image_processor
a : int = prepare_img()
a : str = image_processor(images=__UpperCAmelCase , return_tensors="tf")
# forward pass
a : Union[str, Any] = model(**__UpperCAmelCase , training=__UpperCAmelCase)
# verify the logits
a : str = tf.TensorShape((1, 1000))
self.assertEqual(outputs.logits.shape , __UpperCAmelCase)
a : Any = tf.constant([-0.1_312, 0.4_353, -1.0_499])
self.assertTrue(np.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4))
| 40
|
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def lowercase ( A_ , A_ , A_ , A_ )-> Union[str, Any]:
'''simple docstring'''
a : Any = sorted(zip(A_ , A_ ) , key=lambda A_ : x[0] / x[1] , reverse=A_ )
a , a : int = [i[0] for i in r], [i[1] for i in r]
a : Union[str, Any] = list(accumulate(A_ ) )
a : Optional[Any] = bisect(A_ , A_ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 1
|
"""simple docstring"""
# using dfs for finding eulerian path traversal
def lowercase ( A_ , A_ , A_ , A_=None )-> Dict:
'''simple docstring'''
a : Tuple = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
a , a : Optional[Any] = True, True
a : List[str] = dfs(A_ , A_ , A_ , A_ )
return path
def lowercase ( A_ , A_ )-> Optional[int]:
'''simple docstring'''
a : Any = 0
a : Dict = -1
for i in range(A_ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
a : str = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def lowercase ( A_ , A_ )-> int:
'''simple docstring'''
a : Tuple = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
a , a : Optional[Any] = check_circuit_or_path(A_ , A_ )
if check == 3:
print("graph is not Eulerian" )
print("no path" )
return
a : Any = 1
if check == 2:
a : List[Any] = odd_node
print("graph has a Euler path" )
if check == 1:
print("graph has a Euler cycle" )
a : Optional[Any] = dfs(A_ , A_ , A_ )
print(A_ )
def lowercase ( )-> Tuple:
'''simple docstring'''
a : Optional[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
a : Union[str, Any] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
a : str = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
a : Dict = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
a : Dict = {
1: [],
2: []
# all degree is zero
}
a : int = 10
check_euler(A_ , A_ )
check_euler(A_ , A_ )
check_euler(A_ , A_ )
check_euler(A_ , A_ )
check_euler(A_ , A_ )
if __name__ == "__main__":
main()
| 40
|
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowercase ( A_ , A_ , A_ = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(A_ ), magnitude * sin(A_ )]
return [magnitude * cos(radians(A_ ) ), magnitude * sin(radians(A_ ) )]
def lowercase ( A_ , A_ , A_ = 10**-1 )-> bool:
'''simple docstring'''
a : NDArray[floataa] = cross(A_ , A_ )
a : float = sum(A_ )
return abs(A_ ) < eps
if __name__ == "__main__":
# Test to check if it works
__lowercase = array(
[
polar_force(7_18.4, 180 - 30),
polar_force(8_79.54, 45),
polar_force(100, -90),
]
)
__lowercase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__lowercase = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
__lowercase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__lowercase = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
__lowercase = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 40
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : str = """roformer"""
def __init__( self : Dict , __UpperCAmelCase : List[Any]=50000 , __UpperCAmelCase : Any=None , __UpperCAmelCase : Optional[Any]=768 , __UpperCAmelCase : Tuple=12 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : Tuple=3072 , __UpperCAmelCase : List[str]="gelu" , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : str=1536 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : str=1e-12 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : Optional[int]=True , **__UpperCAmelCase : Optional[int] , ):
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a : Union[str, Any] = vocab_size
a : Tuple = hidden_size if embedding_size is None else embedding_size
a : Optional[int] = hidden_size
a : Optional[Any] = num_hidden_layers
a : List[Any] = num_attention_heads
a : List[str] = hidden_act
a : Tuple = intermediate_size
a : Any = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : Union[str, Any] = max_position_embeddings
a : List[Any] = type_vocab_size
a : Tuple = initializer_range
a : int = layer_norm_eps
a : int = rotary_value
a : Optional[Any] = use_cache
class _A ( _a ):
"""simple docstring"""
@property
def __snake_case ( self : int):
if self.task == "multiple-choice":
a : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
a : str = {0: "batch", 1: "sequence"}
a : Union[str, Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
])
| 40
|
"""simple docstring"""
def lowercase ( A_ , A_ )-> float:
'''simple docstring'''
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(A_ ) * abs(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 40
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40
|
"""simple docstring"""
import os
import sys
import unittest
__lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowercase = os.path.join(git_repo_path, """src""", """diffusers""")
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Any):
a : List[Any] = find_backend(" if not is_torch_available():")
self.assertEqual(__UpperCAmelCase , "torch")
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
a : Dict = find_backend(" if not (is_torch_available() and is_transformers_available()):")
self.assertEqual(__UpperCAmelCase , "torch_and_transformers")
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
a : int = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):")
self.assertEqual(__UpperCAmelCase , "torch_and_transformers_and_onnx")
def __snake_case ( self : Union[str, Any]):
a : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , __UpperCAmelCase)
self.assertIn("torch_and_transformers" , __UpperCAmelCase)
self.assertIn("flax_and_transformers" , __UpperCAmelCase)
self.assertIn("torch_and_transformers_and_onnx" , __UpperCAmelCase)
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"])
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"])
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"])
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"])
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"])
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"])
def __snake_case ( self : Tuple):
a : Optional[int] = create_dummy_object("CONSTANT" , "'torch'")
self.assertEqual(__UpperCAmelCase , "\nCONSTANT = None\n")
a : Dict = create_dummy_object("function" , "'torch'")
self.assertEqual(
__UpperCAmelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n")
a : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
a : int = create_dummy_object("FakeClass" , "'torch'")
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : List[str]):
a : List[str] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
a : Tuple = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]})
self.assertEqual(dummy_files["torch"] , __UpperCAmelCase)
| 40
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class _A :
"""simple docstring"""
UpperCAmelCase : int
UpperCAmelCase : int
class _A :
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : int):
a : list[list[Edge]] = [[] for _ in range(__UpperCAmelCase)]
a : Union[str, Any] = size
def __getitem__( self : Tuple , __UpperCAmelCase : int):
return iter(self._graph[vertex])
@property
def __snake_case ( self : Optional[int]):
return self._size
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int):
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1.")
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size).")
self._graph[from_vertex].append(Edge(__UpperCAmelCase , __UpperCAmelCase))
def __snake_case ( self : Any , __UpperCAmelCase : int , __UpperCAmelCase : int):
a : List[str] = deque([start_vertex])
a : list[int | None] = [None] * self.size
a : List[Any] = 0
while queue:
a : int = queue.popleft()
a : Union[str, Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
a : Dict = current_distance + edge.weight
a : Optional[Any] = distances[edge.destination_vertex]
if (
isinstance(__UpperCAmelCase , __UpperCAmelCase)
and new_distance >= dest_vertex_distance
):
continue
a : List[Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex)
else:
queue.append(edge.destination_vertex)
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex.")
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
"""simple docstring"""
__lowercase = {
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 40
| 1
|
"""simple docstring"""
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = """▁"""
__lowercase = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : int = BigBirdTokenizer
UpperCAmelCase : int = BigBirdTokenizerFast
UpperCAmelCase : List[Any] = True
UpperCAmelCase : List[str] = True
def __snake_case ( self : List[str]):
super().setUp()
a : int = self.tokenizer_class(__UpperCAmelCase , keep_accents=__UpperCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def __snake_case ( self : Tuple):
a : Optional[int] = "<s>"
a : Optional[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase) , __UpperCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase) , __UpperCAmelCase)
def __snake_case ( self : Tuple):
a : Tuple = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<unk>")
self.assertEqual(vocab_keys[1] , "<s>")
self.assertEqual(vocab_keys[-1] , "[MASK]")
self.assertEqual(len(__UpperCAmelCase) , 1004)
def __snake_case ( self : Any):
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def __snake_case ( self : int):
if not self.test_rust_tokenizer:
return
a : Dict = self.get_tokenizer()
a : int = self.get_rust_tokenizer()
a : Optional[Any] = "I was born in 92000, and this is falsé."
a : str = tokenizer.tokenize(__UpperCAmelCase)
a : Dict = rust_tokenizer.tokenize(__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a : Dict = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase)
a : List[str] = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
a : List[str] = self.get_rust_tokenizer()
a : Any = tokenizer.encode(__UpperCAmelCase)
a : List[Any] = rust_tokenizer.encode(__UpperCAmelCase)
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : List[str]):
a : Optional[int] = BigBirdTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase)
a : int = tokenizer.tokenize("This is a test")
self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [285, 46, 10, 170, 382] , )
a : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a : Tuple = tokenizer.convert_tokens_to_ids(__UpperCAmelCase)
self.assertListEqual(
__UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
a : Dict = tokenizer.convert_ids_to_tokens(__UpperCAmelCase)
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __snake_case ( self : Any):
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
@slow
def __snake_case ( self : Tuple):
a : Optional[int] = "Hello World!"
a : int = [65, 18536, 2260, 101, 66]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase))
@slow
def __snake_case ( self : Union[str, Any]):
a : Optional[Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
a : int = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase))
@require_torch
@slow
def __snake_case ( self : str):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
a : Dict = list(self.big_tokenizer.get_vocab().keys())[:10]
a : Optional[int] = " ".join(__UpperCAmelCase)
a : Optional[int] = self.big_tokenizer.encode_plus(__UpperCAmelCase , return_tensors="pt" , return_token_type_ids=__UpperCAmelCase)
a : Tuple = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=__UpperCAmelCase)
a : Dict = BigBirdConfig(attention_type="original_full")
a : List[str] = BigBirdModel(__UpperCAmelCase)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCAmelCase)
model(**__UpperCAmelCase)
@slow
def __snake_case ( self : List[Any]):
a : Any = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
a : Optional[int] = tokenizer.decode(tokenizer("Paris is the [MASK].").input_ids)
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]")
@slow
def __snake_case ( self : Optional[int]):
# fmt: off
a : Any = {"input_ids": [[65, 39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114, 66], [65, 448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 40
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40
| 1
|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = """▁"""
__lowercase = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
__lowercase = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
__lowercase = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
__lowercase = {
"""ernie-m-base""": 514,
"""ernie-m-large""": 514,
}
__lowercase = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : List[str] = ["input_ids"]
UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : int = RESOURCE_FILES_NAMES
def __init__( self : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int=None , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : Tuple="utf8" , __UpperCAmelCase : List[str]="[UNK]" , __UpperCAmelCase : Dict="[SEP]" , __UpperCAmelCase : Any="[PAD]" , __UpperCAmelCase : str="[CLS]" , __UpperCAmelCase : Optional[Any]="[MASK]" , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : Dict , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
a : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , vocab_file=__UpperCAmelCase , encoding=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
a : str = do_lower_case
a : Dict = sentencepiece_model_ckpt
a : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(__UpperCAmelCase)
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
a : Any = self.load_vocab(filepath=__UpperCAmelCase)
else:
a : int = {self.sp_model.id_to_piece(__UpperCAmelCase): id for id in range(self.sp_model.get_piece_size())}
a : int = {v: k for k, v in self.vocab.items()}
def __snake_case ( self : Any , __UpperCAmelCase : List[Any]):
if text is None:
return None
a : Any = self.tokenize(__UpperCAmelCase)
a , a : str = "", []
for i, ch in enumerate(__UpperCAmelCase):
if ch in self.SP_CHAR_MAPPING:
a : List[Any] = self.SP_CHAR_MAPPING.get(__UpperCAmelCase)
else:
a : Union[str, Any] = unicodedata.normalize("NFKC" , __UpperCAmelCase)
if self.is_whitespace(__UpperCAmelCase):
continue
normalized_text += ch
char_mapping.extend([i] * len(__UpperCAmelCase))
a , a , a : Optional[int] = normalized_text, [], 0
if self.do_lower_case:
a : List[str] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
a : int = token[1:]
a : Tuple = text[offset:].index(__UpperCAmelCase) + offset
a : List[str] = start + len(__UpperCAmelCase)
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1))
a : List[Any] = end
return token_mapping
@property
def __snake_case ( self : str):
return len(self.vocab)
def __snake_case ( self : Optional[Any]):
return dict(self.vocab , **self.added_tokens_encoder)
def __getstate__( self : int):
a : str = self.__dict__.copy()
a : Optional[Any] = None
return state
def __setstate__( self : str , __UpperCAmelCase : str):
a : Tuple = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
a : Optional[int] = {}
a : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.sentencepiece_model_ckpt)
def __snake_case ( self : Dict , __UpperCAmelCase : List[Any]):
return "".join((self.SP_CHAR_MAPPING.get(__UpperCAmelCase , __UpperCAmelCase) for c in text))
def __snake_case ( self : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : Optional[Any]=64 , __UpperCAmelCase : Any=0.1):
if self.sp_model_kwargs.get("enable_sampling") is True:
a : Union[str, Any] = True
if self.sp_model_kwargs.get("alpha") is not None:
a : Optional[int] = self.sp_model_kwargs.get("alpha")
if self.sp_model_kwargs.get("nbest_size") is not None:
a : Dict = self.sp_model_kwargs.get("nbest_size")
if not enable_sampling:
a : int = self.sp_model.EncodeAsPieces(__UpperCAmelCase)
else:
a : List[Any] = self.sp_model.SampleEncodeAsPieces(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a : Optional[Any] = []
for pi, piece in enumerate(__UpperCAmelCase):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(__UpperCAmelCase) and pi != 0:
new_pieces.append(__UpperCAmelCase)
continue
else:
continue
a : Any = 0
for i, chunk in enumerate(__UpperCAmelCase):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(__UpperCAmelCase) or self.is_punct(__UpperCAmelCase):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
new_pieces.append(__UpperCAmelCase)
a : Optional[int] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
a : int = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
a : Tuple = i
if len(__UpperCAmelCase) > lst_i:
new_pieces.append(piece[lst_i:])
return new_pieces
def __snake_case ( self : List[str] , __UpperCAmelCase : Union[str, Any]):
a : str = "".join(__UpperCAmelCase).replace(__UpperCAmelCase , " ").strip()
return out_string
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Union[str, Any]):
a : Tuple = self.convert_ids_to_tokens(__UpperCAmelCase)
a : str = "".join(__UpperCAmelCase).replace(__UpperCAmelCase , " ").strip()
return out_string
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Optional[int]):
return self.vocab.get(__UpperCAmelCase , self.vocab.get(self.unk_token))
def __snake_case ( self : Any , __UpperCAmelCase : Tuple):
return self.reverse_vocab.get(__UpperCAmelCase , self.unk_token)
def __snake_case ( self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any]=None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a : int = [self.cls_token_id]
a : Optional[int] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def __snake_case ( self : List[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict=None):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def __snake_case ( self : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : str=None , __UpperCAmelCase : int=False):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model.")
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCAmelCase)) + [1, 1] + ([0] * len(__UpperCAmelCase)) + [1]
return [1] + ([0] * len(__UpperCAmelCase)) + [1]
def __snake_case ( self : str , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(__UpperCAmelCase) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(__UpperCAmelCase) + 1) + [1] * (len(__UpperCAmelCase) + 3)
def __snake_case ( self : int , __UpperCAmelCase : Optional[int]):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def __snake_case ( self : Tuple , __UpperCAmelCase : Union[str, Any]):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def __snake_case ( self : int , __UpperCAmelCase : List[Any]):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def __snake_case ( self : str , __UpperCAmelCase : Optional[Any]):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(__UpperCAmelCase) == 1:
a : Optional[Any] = unicodedata.category(__UpperCAmelCase)
if cat == "Zs":
return True
return False
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[Any]):
a : Optional[int] = {}
with io.open(__UpperCAmelCase , "r" , encoding="utf-8") as f:
for index, line in enumerate(__UpperCAmelCase):
a : Tuple = line.rstrip("\n")
a : Any = int(__UpperCAmelCase)
return token_to_idx
def __snake_case ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None):
a : int = 0
if os.path.isdir(__UpperCAmelCase):
a : Union[str, Any] = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
else:
a : Tuple = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(__UpperCAmelCase , "w" , encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda __UpperCAmelCase: kv[1]):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!")
a : Union[str, Any] = token_index
writer.write(token + "\n")
index += 1
a : Union[str, Any] = os.path.join(__UpperCAmelCase , "sentencepiece.bpe.model")
with open(__UpperCAmelCase , "wb") as fi:
a : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase)
return (vocab_file,)
| 40
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : str = """naver-clova-ix/donut-base-finetuned-docvqa"""
UpperCAmelCase : Tuple = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
UpperCAmelCase : List[str] = """document_qa"""
UpperCAmelCase : str = AutoProcessor
UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel
UpperCAmelCase : int = ["""image""", """text"""]
UpperCAmelCase : int = ["""text"""]
def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any):
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.")
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : Tuple , __UpperCAmelCase : "Image" , __UpperCAmelCase : str):
a : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
a : Union[str, Any] = task_prompt.replace("{user_input}" , __UpperCAmelCase)
a : Optional[Any] = self.pre_processor.tokenizer(
__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt").input_ids
a : Any = self.pre_processor(__UpperCAmelCase , return_tensors="pt").pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __snake_case ( self : int , __UpperCAmelCase : int):
return self.model.generate(
inputs["pixel_values"].to(self.device) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences
def __snake_case ( self : str , __UpperCAmelCase : List[Any]):
a : Union[str, Any] = self.pre_processor.batch_decode(__UpperCAmelCase)[0]
a : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "")
a : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "")
a : Optional[Any] = re.sub(r"<.*?>" , "" , __UpperCAmelCase , count=1).strip() # remove first task start token
a : List[str] = self.pre_processor.tokenajson(__UpperCAmelCase)
return sequence["answer"]
| 40
| 1
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Optional[int] = ["""input_features"""]
def __init__( self : Tuple , __UpperCAmelCase : List[Any]=80 , __UpperCAmelCase : Tuple=16000 , __UpperCAmelCase : Optional[Any]=160 , __UpperCAmelCase : Optional[int]=30 , __UpperCAmelCase : Any=400 , __UpperCAmelCase : List[Any]=0.0 , __UpperCAmelCase : Tuple=False , **__UpperCAmelCase : Dict , ):
super().__init__(
feature_size=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , padding_value=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
a : Optional[int] = n_fft
a : Union[str, Any] = hop_length
a : Optional[int] = chunk_length
a : str = chunk_length * sampling_rate
a : List[str] = self.n_samples // hop_length
a : Dict = sampling_rate
a : List[str] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__UpperCAmelCase , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=__UpperCAmelCase , norm="slaney" , mel_scale="slaney" , )
def __snake_case ( self : int , __UpperCAmelCase : np.array):
a : List[str] = spectrogram(
__UpperCAmelCase , window_function(self.n_fft , "hann") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
a : Union[str, Any] = log_spec[:, :-1]
a : List[str] = np.maximum(__UpperCAmelCase , log_spec.max() - 8.0)
a : Dict = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __snake_case ( __UpperCAmelCase : List[np.ndarray] , __UpperCAmelCase : List[np.ndarray] , __UpperCAmelCase : float = 0.0):
if attention_mask is not None:
a : str = np.array(__UpperCAmelCase , np.intaa)
a : Dict = []
for vector, length in zip(__UpperCAmelCase , attention_mask.sum(-1)):
a : Any = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7)
if length < normed_slice.shape[0]:
a : Any = padding_value
normed_input_values.append(__UpperCAmelCase)
else:
a : List[str] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values]
return normed_input_values
def __call__( self : List[Any] , __UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[str] = "max_length" , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , **__UpperCAmelCase : Tuple , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''')
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug.")
a : Tuple = isinstance(__UpperCAmelCase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''')
a : str = is_batched_numpy or (
isinstance(__UpperCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
a : Optional[int] = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCAmelCase , np.ndarray):
a : str = np.asarray(__UpperCAmelCase , dtype=np.floataa)
elif isinstance(__UpperCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
a : Union[str, Any] = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
a : Union[str, Any] = [np.asarray([raw_speech]).T]
a : Any = BatchFeature({"input_features": raw_speech})
# convert into correct format for padding
a : List[str] = self.pad(
__UpperCAmelCase , padding=__UpperCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
a : int = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
a : int = np.stack(padded_inputs["input_features"] , axis=0)
# make sure list is in array format
a : Optional[Any] = padded_inputs.get("input_features").transpose(2 , 0 , 1)
a : Optional[int] = [self._np_extract_fbank_features(__UpperCAmelCase) for waveform in input_features[0]]
if isinstance(input_features[0] , __UpperCAmelCase):
a : Union[str, Any] = [np.asarray(__UpperCAmelCase , dtype=np.floataa) for feature in input_features]
else:
a : Union[str, Any] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
a : Union[str, Any] = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
a : int = padded_inputs.convert_to_tensors(__UpperCAmelCase)
return padded_inputs
def __snake_case ( self : List[Any]):
a : Tuple = copy.deepcopy(self.__dict__)
a : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 40
|
"""simple docstring"""
from __future__ import annotations
class _A :
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : int = 0):
a : Tuple = key
def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
a : Dict = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__UpperCAmelCase) ^ key) for ch in content]
def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__UpperCAmelCase) ^ key) for ch in content]
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
a : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
a : Any = ""
for ch in content:
ans += chr(ord(__UpperCAmelCase) ^ key)
return ans
def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
a : Dict = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
a : str = ""
for ch in content:
ans += chr(ord(__UpperCAmelCase) ^ key)
return ans
def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int = 0):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
try:
with open(__UpperCAmelCase) as fin, open("encrypt.out" , "w+") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__UpperCAmelCase , __UpperCAmelCase))
except OSError:
return False
return True
def __snake_case ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : int):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
try:
with open(__UpperCAmelCase) as fin, open("decrypt.out" , "w+") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__UpperCAmelCase , __UpperCAmelCase))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 40
| 1
|
"""simple docstring"""
from __future__ import annotations
from random import random
class _A :
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : int | None = None):
a : Union[str, Any] = value
a : Dict = random()
a : Node | None = None
a : Node | None = None
def __repr__( self : str):
from pprint import pformat
if self.left is None and self.right is None:
return f'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{f'''{self.value}: {self.prior:.5}''': (self.left, self.right)} , indent=1)
def __str__( self : str):
a : Tuple = str(self.value) + " "
a : Tuple = str(self.left or "")
a : Optional[int] = str(self.right or "")
return value + left + right
def lowercase ( A_ , A_ )-> tuple[Node | None, Node | None]:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
a , a : Tuple = split(root.left , A_ )
return left, root
else:
a , a : Any = split(root.right , A_ )
return root, right
def lowercase ( A_ , A_ )-> Node | None:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
a : str = merge(left.right , A_ )
return left
else:
a : str = merge(A_ , right.left )
return right
def lowercase ( A_ , A_ )-> Node | None:
'''simple docstring'''
a : int = Node(A_ )
a , a : Any = split(A_ , A_ )
return merge(merge(A_ , A_ ) , A_ )
def lowercase ( A_ , A_ )-> Node | None:
'''simple docstring'''
a , a : int = split(A_ , value - 1 )
a , a : str = split(A_ , A_ )
return merge(A_ , A_ )
def lowercase ( A_ )-> None:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def lowercase ( A_ , A_ )-> Node | None:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
a : str = insert(A_ , int(arg[1:] ) )
elif arg[0] == "-":
a : List[Any] = erase(A_ , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def lowercase ( )-> None:
'''simple docstring'''
a : str = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
a : Union[str, Any] = input()
while args != "q":
a : int = interact_treap(A_ , A_ )
print(A_ )
a : List[Any] = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 40
|
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowercase ( A_ )-> List[Any]:
'''simple docstring'''
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
class _A :
"""simple docstring"""
def __init__( self : str , __UpperCAmelCase : int):
a : List[Any] = metric_id
class _A :
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = [MetricMock(_a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def __snake_case ( self : List[str]):
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Any:
'''simple docstring'''
if "tmp_path" in args:
a : Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(A_ , match="https://huggingface.co/docs/evaluate" ):
func(*A_ )
| 40
| 1
|
"""simple docstring"""
def lowercase ( A_ = 600_851_475_143 )-> int:
'''simple docstring'''
try:
a : Union[str, Any] = int(A_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
a : Tuple = 1
a : Tuple = 2
while i * i <= n:
while n % i == 0:
a : Optional[Any] = i
n //= i
i += 1
if n > 1:
a : List[Any] = n
return int(A_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 40
|
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
__lowercase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowercase ( A_ )-> list[list[int]]:
'''simple docstring'''
a : str = []
for i in range(len(A_ ) ):
a : str = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
a : Union[str, Any] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
a : Tuple = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A_ )
return next_generation
def lowercase ( A_ , A_ )-> list[Image.Image]:
'''simple docstring'''
a : List[str] = []
for _ in range(A_ ):
# Create output image
a : str = Image.new("RGB" , (len(cells[0] ), len(A_ )) )
a : Union[str, Any] = img.load()
# Save cells to image
for x in range(len(A_ ) ):
for y in range(len(cells[0] ) ):
a : Optional[Any] = 255 - cells[y][x] * 255
a : str = (colour, colour, colour)
# Save image
images.append(A_ )
a : Tuple = new_generation(A_ )
return images
if __name__ == "__main__":
__lowercase = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 40
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase ( A_ , A_ )-> list[int]:
'''simple docstring'''
a : Tuple = 0
a : Any = len(A_ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
a : Tuple = i + 1
else:
a : Dict = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 40
|
"""simple docstring"""
from itertools import permutations
def lowercase ( A_ )-> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
a : Optional[int] = [7, 11, 13, 17]
for i, test in enumerate(A_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase ( A_ = 10 )-> int:
'''simple docstring'''
return sum(
int("".join(map(A_ , A_ ) ) )
for num in permutations(range(A_ ) )
if is_substring_divisible(A_ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 40
| 1
|
"""simple docstring"""
import argparse
import copy
def lowercase ( A_ )-> Optional[int]:
'''simple docstring'''
a : Optional[Any] = {}
with open(A_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
a : List[Any] = []
_list.append([line.split()[1], line.split()[2]] )
a : Optional[int] = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
a : List[str] = []
_list.append([line.split()[0], line.split()[2]] )
a : Optional[Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowercase ( A_ , A_ )-> Tuple:
'''simple docstring'''
with open(A_ ) as f:
a : Union[str, Any] = f.read(1 )
a : Tuple = start_node
a : Optional[int] = []
a : List[str] = start_node
a : List[Any] = 0
while visiting not in first_solution:
a : Optional[int] = 10_000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(A_ ) and k[0] not in first_solution:
a : List[Any] = k[1]
a : str = k[0]
first_solution.append(A_ )
a : Union[str, Any] = distance_of_first_solution + int(A_ )
a : Dict = best_node
first_solution.append(A_ )
a : List[str] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
a : str = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10_000
)
return first_solution, distance_of_first_solution
def lowercase ( A_ , A_ )-> Dict:
'''simple docstring'''
a : Dict = []
for n in solution[1:-1]:
a : Optional[Any] = solution.index(A_ )
for kn in solution[1:-1]:
a : Tuple = solution.index(A_ )
if n == kn:
continue
a : Any = copy.deepcopy(A_ )
a : Union[str, Any] = kn
a : Optional[int] = n
a : str = 0
for k in _tmp[:-1]:
a : Tuple = _tmp[_tmp.index(A_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
a : Dict = distance + int(i[1] )
_tmp.append(A_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
a : str = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda A_ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Tuple:
'''simple docstring'''
a : Dict = 1
a : str = first_solution
a : List[Any] = []
a : Any = distance_of_first_solution
a : List[str] = solution
while count <= iters:
a : int = find_neighborhood(A_ , A_ )
a : Dict = 0
a : List[Any] = neighborhood[index_of_best_solution]
a : Tuple = len(A_ ) - 1
a : List[str] = False
while not found:
a : str = 0
while i < len(A_ ):
if best_solution[i] != solution[i]:
a : Optional[int] = best_solution[i]
a : Tuple = solution[i]
break
a : List[Any] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
a : int = True
a : str = best_solution[:-1]
a : int = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
a : Dict = cost
a : Tuple = solution
else:
a : Optional[Any] = index_of_best_solution + 1
a : Dict = neighborhood[index_of_best_solution]
if len(A_ ) >= size:
tabu_list.pop(0 )
a : Optional[int] = count + 1
return best_solution_ever, best_cost
def lowercase ( A_=None )-> Tuple:
'''simple docstring'''
a : Optional[int] = generate_neighbours(args.File )
a , a : Dict = generate_first_solution(
args.File , A_ )
a , a : Optional[Any] = tabu_search(
A_ , A_ , A_ , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser(description="""Tabu Search""")
parser.add_argument(
"""-f""",
"""--File""",
type=str,
help="""Path to the file containing the data""",
required=True,
)
parser.add_argument(
"""-i""",
"""--Iterations""",
type=int,
help="""How many iterations the algorithm should perform""",
required=True,
)
parser.add_argument(
"""-s""", """--Size""", type=int, help="""Size of the tabu list""", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 40
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Dict = KandinskyVaaControlnetPipeline
UpperCAmelCase : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
UpperCAmelCase : Optional[Any] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
UpperCAmelCase : Dict = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCAmelCase : Optional[int] = False
@property
def __snake_case ( self : Optional[Any]):
return 32
@property
def __snake_case ( self : Dict):
return 32
@property
def __snake_case ( self : Dict):
return self.time_input_dim
@property
def __snake_case ( self : Any):
return self.time_input_dim * 4
@property
def __snake_case ( self : str):
return 100
@property
def __snake_case ( self : str):
torch.manual_seed(0)
a : str = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
a : Dict = UNetaDConditionModel(**__UpperCAmelCase)
return model
@property
def __snake_case ( self : str):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __snake_case ( self : Union[str, Any]):
torch.manual_seed(0)
a : Dict = VQModel(**self.dummy_movq_kwargs)
return model
def __snake_case ( self : Optional[Any]):
a : Optional[Any] = self.dummy_unet
a : int = self.dummy_movq
a : str = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__UpperCAmelCase , )
a : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=0):
a : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase)
a : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
__UpperCAmelCase)
# create hint
a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase)
if str(__UpperCAmelCase).startswith("mps"):
a : Union[str, Any] = torch.manual_seed(__UpperCAmelCase)
else:
a : List[Any] = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase)
a : str = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __snake_case ( self : Dict):
a : str = "cpu"
a : Tuple = self.get_dummy_components()
a : Dict = self.pipeline_class(**__UpperCAmelCase)
a : Optional[int] = pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCAmelCase))
a : Any = output.images
a : Any = pipe(
**self.get_dummy_inputs(__UpperCAmelCase) , return_dict=__UpperCAmelCase , )[0]
a : Union[str, Any] = image[0, -3:, -3:, -1]
a : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a : Tuple = np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Optional[int]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : List[str]):
a : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy")
a : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
a : List[Any] = torch.from_numpy(np.array(__UpperCAmelCase)).float() / 255.0
a : str = hint.permute(2 , 0 , 1).unsqueeze(0)
a : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(__UpperCAmelCase)
a : List[str] = KandinskyVaaControlnetPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa)
a : int = pipeline.to(__UpperCAmelCase)
pipeline.set_progress_bar_config(disable=__UpperCAmelCase)
a : Tuple = "A robot, 4k photo"
a : Any = torch.Generator(device="cuda").manual_seed(0)
a , a : int = pipe_prior(
__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
a : str = torch.Generator(device="cuda").manual_seed(0)
a : Union[str, Any] = pipeline(
image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , hint=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , output_type="np" , )
a : str = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase)
| 40
| 1
|
"""simple docstring"""
def lowercase ( A_ , A_ )-> float:
'''simple docstring'''
if digit_amount > 0:
return round(number - int(A_ ) , A_ )
return number - int(A_ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 40
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def lowercase ( A_ )-> Dict:
'''simple docstring'''
a : str = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
a : Union[str, Any] = 128
elif "12-12" in model_name:
a : List[Any] = 12
a : str = 12
elif "14-14" in model_name:
a : List[Any] = 14
a : Optional[int] = 14
elif "16-16" in model_name:
a : Any = 16
a : List[Any] = 16
else:
raise ValueError("Model not supported" )
a : Optional[int] = "huggingface/label-files"
if "speech-commands" in model_name:
a : Optional[int] = 35
a : List[str] = "speech-commands-v2-id2label.json"
else:
a : Optional[Any] = 527
a : Tuple = "audioset-id2label.json"
a : List[str] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
a : Union[str, Any] = {int(A_ ): v for k, v in idalabel.items()}
a : Any = idalabel
a : str = {v: k for k, v in idalabel.items()}
return config
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
if "module.v" in name:
a : Union[str, Any] = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
a : List[Any] = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
a : Union[str, Any] = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
a : str = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
a : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
a : Union[str, Any] = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
a : str = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
a : Tuple = name.replace("attn" , "attention.self" )
if "norm1" in name:
a : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
a : Union[str, Any] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
a : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a : Optional[Any] = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
a : Tuple = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
a : List[str] = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
a : Optional[int] = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def lowercase ( A_ , A_ )-> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
a : str = orig_state_dict.pop(A_ )
if "qkv" in key:
a : int = key.split("." )
a : Optional[int] = int(key_split[3] )
a : int = config.hidden_size
if "weight" in key:
a : List[str] = val[:dim, :]
a : Any = val[dim : dim * 2, :]
a : int = val[-dim:, :]
else:
a : Optional[Any] = val[:dim]
a : Union[str, Any] = val[dim : dim * 2]
a : str = val[-dim:]
else:
a : str = val
return orig_state_dict
def lowercase ( A_ )-> Dict:
'''simple docstring'''
a : Union[str, Any] = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(A_ , A_ )
@torch.no_grad()
def lowercase ( A_ , A_ , A_=False )-> Optional[int]:
'''simple docstring'''
a : Optional[int] = get_audio_spectrogram_transformer_config(A_ )
a : Dict = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
a : Any = model_name_to_url[model_name]
a : List[Any] = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" )
# remove some keys
remove_keys(A_ )
# rename some keys
a : Union[str, Any] = convert_state_dict(A_ , A_ )
# load 🤗 model
a : List[str] = ASTForAudioClassification(A_ )
model.eval()
model.load_state_dict(A_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
a : Tuple = -4.2_6_7_7_3_9_3 if "speech-commands" not in model_name else -6.8_4_5_9_7_8
a : Union[str, Any] = 4.5_6_8_9_9_7_4 if "speech-commands" not in model_name else 5.5_6_5_4_5_2_6
a : str = 1_024 if "speech-commands" not in model_name else 128
a : List[Any] = ASTFeatureExtractor(mean=A_ , std=A_ , max_length=A_ )
if "speech-commands" in model_name:
a : List[str] = load_dataset("speech_commands" , "v0.02" , split="validation" )
a : int = dataset[0]["audio"]["array"]
else:
a : Tuple = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
a , a : Tuple = torchaudio.load(A_ )
a : Optional[Any] = waveform.squeeze().numpy()
a : Union[str, Any] = feature_extractor(A_ , sampling_rate=16_000 , return_tensors="pt" )
# forward pass
a : Optional[Any] = model(**A_ )
a : List[str] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
a : Any = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
a : Optional[int] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
a : List[str] = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
a : Tuple = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
a : int = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
a : Any = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
a : Dict = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] )
elif model_name == "ast-finetuned-speech-commands-v2":
a : Union[str, Any] = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , A_ , atol=1e-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(A_ ).mkdir(exist_ok=A_ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(A_ )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowercase = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 40
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 40
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase = {
"""configuration_rag""": ["""RagConfig"""],
"""retrieval_rag""": ["""RagRetriever"""],
"""tokenization_rag""": ["""RagTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""RagModel""",
"""RagPreTrainedModel""",
"""RagSequenceForGeneration""",
"""RagTokenForGeneration""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TFRagModel""",
"""TFRagPreTrainedModel""",
"""TFRagSequenceForGeneration""",
"""TFRagTokenForGeneration""",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40
| 1
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
__lowercase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__lowercase = 128022
__lowercase = 128028
@require_sentencepiece
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : List[Any] = MaMaaaTokenizer
UpperCAmelCase : List[str] = False
UpperCAmelCase : Tuple = False
UpperCAmelCase : str = True
def __snake_case ( self : int):
super().setUp()
a : Any = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
a : Any = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase))))
a : List[str] = Path(self.tmpdirname)
save_json(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["vocab_file"])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__UpperCAmelCase , save_dir / VOCAB_FILES_NAMES["spm_file"])
a : Tuple = MaMaaaTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def __snake_case ( self : str , **__UpperCAmelCase : str):
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase)
def __snake_case ( self : int , __UpperCAmelCase : List[Any]):
return (
"This is a test",
"This is a test",
)
def __snake_case ( self : Tuple):
a : int = "</s>"
a : Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase) , __UpperCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase) , __UpperCAmelCase)
def __snake_case ( self : List[str]):
a : Tuple = self.get_tokenizer()
a : Optional[Any] = list(tokenizer.get_vocab().keys())
self.assertEqual(vocab_keys[0] , "</s>")
self.assertEqual(vocab_keys[1] , "<unk>")
self.assertEqual(vocab_keys[-1] , "<s>")
self.assertEqual(len(__UpperCAmelCase) , tokenizer.vocab_size + len(tokenizer.get_added_vocab()))
@unittest.skip("Skip this test while all models are still to be uploaded.")
def __snake_case ( self : str):
pass
def __snake_case ( self : Optional[int]):
a : Tuple = self.get_tokenizer()
a : Dict = tokenizer.tokenize("This is a test")
self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [2, 3, 4, 5, 6] , )
a : str = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6])
self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"])
a : Optional[Any] = tokenizer.convert_tokens_to_string(__UpperCAmelCase)
self.assertEqual(__UpperCAmelCase , "This is a test")
@slow
def __snake_case ( self : Tuple):
# fmt: off
a : Optional[int] = {"input_ids": [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = """facebook/m2m100_418M"""
UpperCAmelCase : Union[str, Any] = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
UpperCAmelCase : Dict = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
UpperCAmelCase : List[str] = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def __snake_case ( cls : List[str]):
a : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr")
a : List[str] = 1
return cls
def __snake_case ( self : Union[str, Any]):
self.assertEqual(self.tokenizer.get_lang_id("ar") , 128006)
self.assertEqual(self.tokenizer.get_lang_id("en") , 128022)
self.assertEqual(self.tokenizer.get_lang_id("ro") , 128076)
self.assertEqual(self.tokenizer.get_lang_id("mr") , 128063)
def __snake_case ( self : Tuple):
a : Union[str, Any] = self.tokenizer.get_vocab()
self.assertEqual(len(__UpperCAmelCase) , self.tokenizer.vocab_size)
self.assertEqual(vocab["<unk>"] , 3)
self.assertIn(self.tokenizer.get_lang_token("en") , __UpperCAmelCase)
def __snake_case ( self : List[Any]):
a : int = "en"
a : int = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase)
def __snake_case ( self : Any):
self.assertIn(__UpperCAmelCase , self.tokenizer.all_special_ids)
# fmt: off
a : List[Any] = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
a : str = self.tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase)
a : Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCAmelCase)
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
self.assertNotIn(self.tokenizer.eos_token , __UpperCAmelCase)
def __snake_case ( self : List[str]):
a : Optional[int] = tempfile.mkdtemp()
a : Union[str, Any] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(__UpperCAmelCase)
a : int = MaMaaaTokenizer.from_pretrained(__UpperCAmelCase)
self.assertDictEqual(new_tok.lang_token_to_id , __UpperCAmelCase)
@require_torch
def __snake_case ( self : Optional[int]):
a : Dict = "en"
a : List[str] = "fr"
a : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__UpperCAmelCase , return_tensors="pt")
a : Any = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id)
for k in batch:
a : str = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __snake_case ( self : Union[str, Any]):
a : Dict = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr")])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
a : List[Any] = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh")])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
@require_torch
def __snake_case ( self : Optional[int]):
a : Any = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr")])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
a : List[Any] = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh")])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
@require_torch
def __snake_case ( self : Any):
a : str = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar")
self.assertEqual(
nested_simplify(__UpperCAmelCase) , {
# en_XX, A, test, EOS
"input_ids": [[128022, 58, 4183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 128006,
} , )
| 40
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _A ( _a ,_a ,_a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : str = StableDiffusionInpaintPipeline
UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase : Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase : int = frozenset([] )
def __snake_case ( self : Dict):
torch.manual_seed(0)
a : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , )
a : Tuple = PNDMScheduler(skip_prk_steps=__UpperCAmelCase)
torch.manual_seed(0)
a : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
a : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
a : Any = CLIPTextModel(__UpperCAmelCase)
a : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
a : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any]=0):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase)
a : List[str] = image.cpu().permute(0 , 2 , 3 , 1)[0]
a : Union[str, Any] = Image.fromarray(np.uinta(__UpperCAmelCase)).convert("RGB").resize((64, 64))
a : Dict = Image.fromarray(np.uinta(image + 4)).convert("RGB").resize((64, 64))
if str(__UpperCAmelCase).startswith("mps"):
a : Tuple = torch.manual_seed(__UpperCAmelCase)
else:
a : Tuple = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase)
a : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __snake_case ( self : List[str]):
a : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
a : Tuple = self.get_dummy_components()
a : Optional[int] = StableDiffusionInpaintPipeline(**__UpperCAmelCase)
a : int = sd_pipe.to(__UpperCAmelCase)
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a : Any = self.get_dummy_inputs(__UpperCAmelCase)
a : Optional[int] = sd_pipe(**__UpperCAmelCase).images
a : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a : int = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __snake_case ( self : str):
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Union[str, Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Dict):
a : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
a : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
a : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy")
a : Tuple = "stabilityai/stable-diffusion-2-inpainting"
a : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(__UpperCAmelCase , safety_checker=__UpperCAmelCase)
pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
pipe.enable_attention_slicing()
a : Any = "Face of a yellow cat, high resolution, sitting on a park bench"
a : str = torch.manual_seed(0)
a : Union[str, Any] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 9e-3
def __snake_case ( self : Any):
a : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
a : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
a : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy")
a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting"
a : Any = StableDiffusionInpaintPipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=__UpperCAmelCase , )
pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
pipe.enable_attention_slicing()
a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench"
a : Dict = torch.manual_seed(0)
a : List[Any] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , )
a : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5e-1
def __snake_case ( self : int):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
a : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting"
a : Optional[int] = PNDMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler")
a : int = StableDiffusionInpaintPipeline.from_pretrained(
__UpperCAmelCase , safety_checker=__UpperCAmelCase , scheduler=__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench"
a : Optional[int] = torch.manual_seed(0)
a : str = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="np" , )
a : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 40
| 1
|
"""simple docstring"""
def lowercase ( A_ , A_ )-> float:
'''simple docstring'''
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(A_ ) * abs(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 40
|
"""simple docstring"""
def lowercase ( A_ )-> bool:
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
a : Tuple = sorted(string.lower() )
return len(A_ ) == len(set(A_ ) )
if __name__ == "__main__":
__lowercase = input("""Enter a string """).strip()
__lowercase = is_isogram(input_str)
print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
| 40
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase = {
"""configuration_mobilebert""": [
"""MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileBertConfig""",
"""MobileBertOnnxConfig""",
],
"""tokenization_mobilebert""": ["""MobileBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""MobileBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileBertForMaskedLM""",
"""MobileBertForMultipleChoice""",
"""MobileBertForNextSentencePrediction""",
"""MobileBertForPreTraining""",
"""MobileBertForQuestionAnswering""",
"""MobileBertForSequenceClassification""",
"""MobileBertForTokenClassification""",
"""MobileBertLayer""",
"""MobileBertModel""",
"""MobileBertPreTrainedModel""",
"""load_tf_weights_in_mobilebert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileBertForMaskedLM""",
"""TFMobileBertForMultipleChoice""",
"""TFMobileBertForNextSentencePrediction""",
"""TFMobileBertForPreTraining""",
"""TFMobileBertForQuestionAnswering""",
"""TFMobileBertForSequenceClassification""",
"""TFMobileBertForTokenClassification""",
"""TFMobileBertMainLayer""",
"""TFMobileBertModel""",
"""TFMobileBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__lowercase = datasets.utils.logging.get_logger(__name__)
@dataclass
class _A ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase : int = 1_0_0_0_0
UpperCAmelCase : Optional[List[str]] = None
UpperCAmelCase : Optional[datasets.Features] = None
class _A ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase : str = ParquetConfig
def __snake_case ( self : Tuple):
return datasets.DatasetInfo(features=self.config.features)
def __snake_case ( self : List[Any] , __UpperCAmelCase : str):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''')
a : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__UpperCAmelCase , (str, list, tuple)):
a : Dict = data_files
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a : List[Any] = [dl_manager.iter_files(__UpperCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})]
a : Dict = []
for split_name, files in data_files.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a : Tuple = [dl_manager.iter_files(__UpperCAmelCase) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__UpperCAmelCase):
with open(__UpperCAmelCase , "rb") as f:
a : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase))
break
splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"files": files}))
return splits
def __snake_case ( self : List[str] , __UpperCAmelCase : pa.Table):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
a : Optional[int] = table_cast(__UpperCAmelCase , self.info.features.arrow_schema)
return pa_table
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int):
a : Tuple = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema) != sorted(self.config.columns):
raise ValueError(
f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''')
for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase)):
with open(__UpperCAmelCase , "rb") as f:
a : Tuple = pq.ParquetFile(__UpperCAmelCase)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)):
a : Optional[Any] = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'''{file_idx}_{batch_idx}''', self._cast_table(__UpperCAmelCase)
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(__UpperCAmelCase)}: {e}''')
raise
| 40
| 1
|
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__lowercase = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
__lowercase = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
__lowercase = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def lowercase ( A_ )-> dict[str, int]:
'''simple docstring'''
a : Any = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def lowercase ( A_ )-> str:
'''simple docstring'''
return x[0]
def lowercase ( A_ )-> str:
'''simple docstring'''
a : List[str] = get_letter_count(A_ )
a : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(A_ )
a : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=A_ )
a : str = "".join(freq_to_letter[freq] )
a : Dict = list(freq_to_letter_str.items() )
freq_pairs.sort(key=A_ , reverse=A_ )
a : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(A_ )
def lowercase ( A_ )-> int:
'''simple docstring'''
a : int = get_frequency_order(A_ )
a : Any = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : int = """dpr"""
def __init__( self : List[Any] , __UpperCAmelCase : int=30522 , __UpperCAmelCase : Union[str, Any]=768 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : str="absolute" , __UpperCAmelCase : int = 0 , **__UpperCAmelCase : Tuple , ):
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a : List[Any] = vocab_size
a : Optional[Any] = hidden_size
a : Union[str, Any] = num_hidden_layers
a : Dict = num_attention_heads
a : int = hidden_act
a : Any = intermediate_size
a : Any = hidden_dropout_prob
a : Dict = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Union[str, Any] = type_vocab_size
a : Optional[Any] = initializer_range
a : Dict = layer_norm_eps
a : int = projection_dim
a : str = position_embedding_type
| 40
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40
|
"""simple docstring"""
class _A :
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : int):
a : Tuple = size
a : Dict = [0] * size
a : Optional[int] = [0] * size
@staticmethod
def __snake_case ( __UpperCAmelCase : int):
return index | (index + 1)
@staticmethod
def __snake_case ( __UpperCAmelCase : int):
return (index & (index + 1)) - 1
def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int):
a : Union[str, Any] = value
while index < self.size:
a : Dict = self.get_prev(__UpperCAmelCase) + 1
if current_left_border == index:
a : Optional[int] = value
else:
a : Any = max(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = self.get_next(__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int):
right -= 1 # Because of right is exclusive
a : List[str] = 0
while left <= right:
a : Dict = self.get_prev(__UpperCAmelCase)
if left <= current_left:
a : Optional[int] = max(__UpperCAmelCase , self.tree[right])
a : Optional[Any] = current_left
else:
a : List[str] = max(__UpperCAmelCase , self.arr[right])
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Dict = KandinskyVaaControlnetPipeline
UpperCAmelCase : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
UpperCAmelCase : Optional[Any] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
UpperCAmelCase : Dict = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCAmelCase : Optional[int] = False
@property
def __snake_case ( self : Optional[Any]):
return 32
@property
def __snake_case ( self : Dict):
return 32
@property
def __snake_case ( self : Dict):
return self.time_input_dim
@property
def __snake_case ( self : Any):
return self.time_input_dim * 4
@property
def __snake_case ( self : str):
return 100
@property
def __snake_case ( self : str):
torch.manual_seed(0)
a : str = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
a : Dict = UNetaDConditionModel(**__UpperCAmelCase)
return model
@property
def __snake_case ( self : str):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __snake_case ( self : Union[str, Any]):
torch.manual_seed(0)
a : Dict = VQModel(**self.dummy_movq_kwargs)
return model
def __snake_case ( self : Optional[Any]):
a : Optional[Any] = self.dummy_unet
a : int = self.dummy_movq
a : str = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__UpperCAmelCase , )
a : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=0):
a : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase)
a : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
__UpperCAmelCase)
# create hint
a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase)
if str(__UpperCAmelCase).startswith("mps"):
a : Union[str, Any] = torch.manual_seed(__UpperCAmelCase)
else:
a : List[Any] = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase)
a : str = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __snake_case ( self : Dict):
a : str = "cpu"
a : Tuple = self.get_dummy_components()
a : Dict = self.pipeline_class(**__UpperCAmelCase)
a : Optional[int] = pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCAmelCase))
a : Any = output.images
a : Any = pipe(
**self.get_dummy_inputs(__UpperCAmelCase) , return_dict=__UpperCAmelCase , )[0]
a : Union[str, Any] = image[0, -3:, -3:, -1]
a : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a : Tuple = np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Optional[int]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : List[str]):
a : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy")
a : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
a : List[Any] = torch.from_numpy(np.array(__UpperCAmelCase)).float() / 255.0
a : str = hint.permute(2 , 0 , 1).unsqueeze(0)
a : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(__UpperCAmelCase)
a : List[str] = KandinskyVaaControlnetPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa)
a : int = pipeline.to(__UpperCAmelCase)
pipeline.set_progress_bar_config(disable=__UpperCAmelCase)
a : Tuple = "A robot, 4k photo"
a : Any = torch.Generator(device="cuda").manual_seed(0)
a , a : int = pipe_prior(
__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
a : str = torch.Generator(device="cuda").manual_seed(0)
a : Union[str, Any] = pipeline(
image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , hint=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , output_type="np" , )
a : str = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase)
| 40
|
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[Any]):
a : str = 0
a : Optional[int] = [0]
a : Union[str, Any] = [0]
a : Any = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0)
a : List[str] = [60]
a : str = [10]
a : Optional[int] = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0)
def __snake_case ( self : Optional[int]):
a : Any = 3
a : str = [1, 2, 3]
a : Tuple = [3, 2, 1]
a : Any = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5)
def __snake_case ( self : Tuple):
a : int = 50
a : List[Any] = [60, 100, 120]
a : Optional[int] = [10, 20, 30]
a : str = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220)
if __name__ == "__main__":
unittest.main()
| 40
| 1
|
"""simple docstring"""
from functools import lru_cache
@lru_cache
def lowercase ( A_ )-> int:
'''simple docstring'''
if num < 0:
raise ValueError("Number should not be negative." )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : str = LayoutLMTokenizer
UpperCAmelCase : int = LayoutLMTokenizerFast
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Optional[Any] = True
def __snake_case ( self : Optional[int]):
super().setUp()
a : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def __snake_case ( self : Optional[int] , **__UpperCAmelCase : Tuple):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str):
a : Tuple = "UNwant\u00E9d,running"
a : Dict = "unwanted, running"
return input_text, output_text
def __snake_case ( self : Any):
a : List[Any] = self.tokenizer_class(self.vocab_file)
a : str = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [7, 4, 5, 10, 8, 9])
def __snake_case ( self : Dict):
pass
| 40
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40
|
"""simple docstring"""
def lowercase ( A_ )-> str:
'''simple docstring'''
if isinstance(A_ , A_ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(A_ , A_ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
a : Optional[Any] = False
if num < 0:
a : Tuple = True
a : str = -num
a : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(A_ ) for e in binary )
return "0b" + "".join(str(A_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 1
|
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class _A :
"""simple docstring"""
def __init__( self : Any):
a : Dict = [2, 1, 2, -1]
a : Dict = [1, 2, 3, 4]
def __snake_case ( self : Optional[int]):
a : Dict = len(self.first_signal)
a : int = len(self.second_signal)
a : Tuple = max(__UpperCAmelCase , __UpperCAmelCase)
# create a zero matrix of max_length x max_length
a : Any = [[0] * max_length for i in range(__UpperCAmelCase)]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__UpperCAmelCase):
a : str = deque(self.second_signal)
rotated_signal.rotate(__UpperCAmelCase)
for j, item in enumerate(__UpperCAmelCase):
matrix[i][j] += item
# multiply the matrix with the first signal
a : Any = np.matmul(np.transpose(__UpperCAmelCase) , np.transpose(self.first_signal))
# rounding-off to two decimal places
return [round(__UpperCAmelCase , 2) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 40
|
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def lowercase ( A_ , A_ , A_ , A_ )-> Union[str, Any]:
'''simple docstring'''
a : Any = sorted(zip(A_ , A_ ) , key=lambda A_ : x[0] / x[1] , reverse=A_ )
a , a : int = [i[0] for i in r], [i[1] for i in r]
a : Union[str, Any] = list(accumulate(A_ ) )
a : Optional[Any] = bisect(A_ , A_ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 1
|
"""simple docstring"""
class _A :
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : int):
a : Tuple = size
a : Dict = [0] * size
a : Optional[int] = [0] * size
@staticmethod
def __snake_case ( __UpperCAmelCase : int):
return index | (index + 1)
@staticmethod
def __snake_case ( __UpperCAmelCase : int):
return (index & (index + 1)) - 1
def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int):
a : Union[str, Any] = value
while index < self.size:
a : Dict = self.get_prev(__UpperCAmelCase) + 1
if current_left_border == index:
a : Optional[int] = value
else:
a : Any = max(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = self.get_next(__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int):
right -= 1 # Because of right is exclusive
a : List[str] = 0
while left <= right:
a : Dict = self.get_prev(__UpperCAmelCase)
if left <= current_left:
a : Optional[int] = max(__UpperCAmelCase , self.tree[right])
a : Optional[Any] = current_left
else:
a : List[str] = max(__UpperCAmelCase , self.arr[right])
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowercase ( A_ , A_ , A_ = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(A_ ), magnitude * sin(A_ )]
return [magnitude * cos(radians(A_ ) ), magnitude * sin(radians(A_ ) )]
def lowercase ( A_ , A_ , A_ = 10**-1 )-> bool:
'''simple docstring'''
a : NDArray[floataa] = cross(A_ , A_ )
a : float = sum(A_ )
return abs(A_ ) < eps
if __name__ == "__main__":
# Test to check if it works
__lowercase = array(
[
polar_force(7_18.4, 180 - 30),
polar_force(8_79.54, 45),
polar_force(100, -90),
]
)
__lowercase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__lowercase = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
__lowercase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__lowercase = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
__lowercase = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 40
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__lowercase = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40
|
"""simple docstring"""
def lowercase ( A_ , A_ )-> float:
'''simple docstring'''
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(A_ ) * abs(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 40
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
class _A :
"""simple docstring"""
def __init__( self : str , __UpperCAmelCase : list[str]):
a : list[dict] = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []})
for keyword in keywords:
self.add_keyword(__UpperCAmelCase)
self.set_fail_transitions()
def __snake_case ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : str):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def __snake_case ( self : str , __UpperCAmelCase : str):
a : Dict = 0
for character in keyword:
a : Any = self.find_next_state(__UpperCAmelCase , __UpperCAmelCase)
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
})
self.adlist[current_state]["next_states"].append(len(self.adlist) - 1)
a : Tuple = len(self.adlist) - 1
else:
a : Tuple = next_state
self.adlist[current_state]["output"].append(__UpperCAmelCase)
def __snake_case ( self : Optional[Any]):
a : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(__UpperCAmelCase)
a : Optional[Any] = 0
while q:
a : Optional[int] = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__UpperCAmelCase)
a : List[str] = self.adlist[r]["fail_state"]
while (
self.find_next_state(__UpperCAmelCase , self.adlist[child]["value"]) is None
and state != 0
):
a : Optional[int] = self.adlist[state]["fail_state"]
a : Dict = self.find_next_state(
__UpperCAmelCase , self.adlist[child]["value"])
if self.adlist[child]["fail_state"] is None:
a : Optional[int] = 0
a : Union[str, Any] = (
self.adlist[child]["output"]
+ self.adlist[self.adlist[child]["fail_state"]]["output"]
)
def __snake_case ( self : List[Any] , __UpperCAmelCase : str):
a : dict = {} # returns a dict with keywords and list of its occurrences
a : Any = 0
for i in range(len(__UpperCAmelCase)):
while (
self.find_next_state(__UpperCAmelCase , string[i]) is None
and current_state != 0
):
a : Any = self.adlist[current_state]["fail_state"]
a : str = self.find_next_state(__UpperCAmelCase , string[i])
if next_state is None:
a : Dict = 0
else:
a : Optional[int] = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
a : List[str] = []
result[key].append(i - len(__UpperCAmelCase) + 1)
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
"""simple docstring"""
import os
import sys
import unittest
__lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowercase = os.path.join(git_repo_path, """src""", """diffusers""")
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Any):
a : List[Any] = find_backend(" if not is_torch_available():")
self.assertEqual(__UpperCAmelCase , "torch")
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
a : Dict = find_backend(" if not (is_torch_available() and is_transformers_available()):")
self.assertEqual(__UpperCAmelCase , "torch_and_transformers")
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
a : int = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):")
self.assertEqual(__UpperCAmelCase , "torch_and_transformers_and_onnx")
def __snake_case ( self : Union[str, Any]):
a : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , __UpperCAmelCase)
self.assertIn("torch_and_transformers" , __UpperCAmelCase)
self.assertIn("flax_and_transformers" , __UpperCAmelCase)
self.assertIn("torch_and_transformers_and_onnx" , __UpperCAmelCase)
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"])
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"])
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"])
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"])
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"])
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"])
def __snake_case ( self : Tuple):
a : Optional[int] = create_dummy_object("CONSTANT" , "'torch'")
self.assertEqual(__UpperCAmelCase , "\nCONSTANT = None\n")
a : Dict = create_dummy_object("function" , "'torch'")
self.assertEqual(
__UpperCAmelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n")
a : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
a : int = create_dummy_object("FakeClass" , "'torch'")
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : List[str]):
a : List[str] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
a : Tuple = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]})
self.assertEqual(dummy_files["torch"] , __UpperCAmelCase)
| 40
| 1
|
"""simple docstring"""
import string
import numpy
def lowercase ( A_ , A_ )-> int:
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , A_ )
class _A :
"""simple docstring"""
UpperCAmelCase : List[Any] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
UpperCAmelCase : Union[str, Any] = numpy.vectorize(lambda _a : x % 3_6 )
UpperCAmelCase : List[Any] = numpy.vectorize(_a )
def __init__( self : Tuple , __UpperCAmelCase : numpy.ndarray):
a : Any = self.modulus(__UpperCAmelCase) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
a : Union[str, Any] = encrypt_key.shape[0]
def __snake_case ( self : int , __UpperCAmelCase : str):
return self.key_string.index(__UpperCAmelCase)
def __snake_case ( self : Dict , __UpperCAmelCase : int):
return self.key_string[round(__UpperCAmelCase)]
def __snake_case ( self : int):
a : List[str] = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
a : Dict = det % len(self.key_string)
a : List[Any] = len(self.key_string)
if greatest_common_divisor(__UpperCAmelCase , len(self.key_string)) != 1:
a : Optional[int] = (
f'''determinant modular {req_l} of encryption key({det}) '''
f'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(__UpperCAmelCase)
def __snake_case ( self : Tuple , __UpperCAmelCase : str):
a : Optional[int] = [char for char in text.upper() if char in self.key_string]
a : Any = chars[-1]
while len(__UpperCAmelCase) % self.break_key != 0:
chars.append(__UpperCAmelCase)
return "".join(__UpperCAmelCase)
def __snake_case ( self : str , __UpperCAmelCase : str):
a : str = self.process_text(text.upper())
a : Union[str, Any] = ""
for i in range(0 , len(__UpperCAmelCase) - self.break_key + 1 , self.break_key):
a : List[str] = text[i : i + self.break_key]
a : Dict = [self.replace_letters(__UpperCAmelCase) for char in batch]
a : str = numpy.array([vec]).T
a : int = self.modulus(self.encrypt_key.dot(__UpperCAmelCase)).T.tolist()[
0
]
a : Optional[Any] = "".join(
self.replace_digits(__UpperCAmelCase) for num in batch_encrypted)
encrypted += encrypted_batch
return encrypted
def __snake_case ( self : Any):
a : Dict = round(numpy.linalg.det(self.encrypt_key))
if det < 0:
a : int = det % len(self.key_string)
a : Any = None
for i in range(len(self.key_string)):
if (det * i) % len(self.key_string) == 1:
a : int = i
break
a : Union[str, Any] = (
det_inv
* numpy.linalg.det(self.encrypt_key)
* numpy.linalg.inv(self.encrypt_key)
)
return self.to_int(self.modulus(__UpperCAmelCase))
def __snake_case ( self : Any , __UpperCAmelCase : str):
a : Dict = self.make_decrypt_key()
a : Optional[Any] = self.process_text(text.upper())
a : Optional[Any] = ""
for i in range(0 , len(__UpperCAmelCase) - self.break_key + 1 , self.break_key):
a : Optional[Any] = text[i : i + self.break_key]
a : List[Any] = [self.replace_letters(__UpperCAmelCase) for char in batch]
a : Dict = numpy.array([vec]).T
a : Union[str, Any] = self.modulus(decrypt_key.dot(__UpperCAmelCase)).T.tolist()[0]
a : str = "".join(
self.replace_digits(__UpperCAmelCase) for num in batch_decrypted)
decrypted += decrypted_batch
return decrypted
def lowercase ( )-> None:
'''simple docstring'''
a : List[str] = int(input("Enter the order of the encryption key: " ) )
a : str = []
print("Enter each row of the encryption key with space separated integers" )
for _ in range(A_ ):
a : Optional[int] = [int(A_ ) for x in input().split()]
hill_matrix.append(A_ )
a : List[str] = HillCipher(numpy.array(A_ ) )
print("Would you like to encrypt or decrypt some text? (1 or 2)" )
a : List[str] = input("\n1. Encrypt\n2. Decrypt\n" )
if option == "1":
a : List[Any] = input("What text would you like to encrypt?: " )
print("Your encrypted text is:" )
print(hc.encrypt(A_ ) )
elif option == "2":
a : List[str] = input("What text would you like to decrypt?: " )
print("Your decrypted text is:" )
print(hc.decrypt(A_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 40
|
"""simple docstring"""
__lowercase = {
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 40
| 1
|
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
__lowercase = """path-to-your-trained-model"""
__lowercase = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
__lowercase = """A photo of sks dog in a bucket"""
__lowercase = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 40
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40
| 1
|
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _A ( _a ,_a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = VQModel
UpperCAmelCase : Optional[int] = """sample"""
@property
def __snake_case ( self : Tuple , __UpperCAmelCase : Tuple=(32, 32)):
a : List[str] = 4
a : List[Any] = 3
a : Dict = floats_tensor((batch_size, num_channels) + sizes).to(__UpperCAmelCase)
return {"sample": image}
@property
def __snake_case ( self : Dict):
return (3, 32, 32)
@property
def __snake_case ( self : Tuple):
return (3, 32, 32)
def __snake_case ( self : List[Any]):
a : int = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
a : Tuple = self.dummy_input
return init_dict, inputs_dict
def __snake_case ( self : List[Any]):
pass
def __snake_case ( self : str):
pass
def __snake_case ( self : Dict):
a , a : str = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(__UpperCAmelCase)
a : Tuple = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def __snake_case ( self : List[str]):
a : Any = VQModel.from_pretrained("fusing/vqgan-dummy")
model.to(__UpperCAmelCase).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
a : Optional[int] = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size)
a : Optional[Any] = image.to(__UpperCAmelCase)
with torch.no_grad():
a : List[str] = model(__UpperCAmelCase).sample
a : Dict = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
a : Union[str, Any] = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143])
# fmt: on
self.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3))
| 40
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : str = """naver-clova-ix/donut-base-finetuned-docvqa"""
UpperCAmelCase : Tuple = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
UpperCAmelCase : List[str] = """document_qa"""
UpperCAmelCase : str = AutoProcessor
UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel
UpperCAmelCase : int = ["""image""", """text"""]
UpperCAmelCase : int = ["""text"""]
def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any):
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.")
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : Tuple , __UpperCAmelCase : "Image" , __UpperCAmelCase : str):
a : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
a : Union[str, Any] = task_prompt.replace("{user_input}" , __UpperCAmelCase)
a : Optional[Any] = self.pre_processor.tokenizer(
__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt").input_ids
a : Any = self.pre_processor(__UpperCAmelCase , return_tensors="pt").pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __snake_case ( self : int , __UpperCAmelCase : int):
return self.model.generate(
inputs["pixel_values"].to(self.device) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences
def __snake_case ( self : str , __UpperCAmelCase : List[Any]):
a : Union[str, Any] = self.pre_processor.batch_decode(__UpperCAmelCase)[0]
a : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "")
a : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "")
a : Optional[Any] = re.sub(r"<.*?>" , "" , __UpperCAmelCase , count=1).strip() # remove first task start token
a : List[str] = self.pre_processor.tokenajson(__UpperCAmelCase)
return sequence["answer"]
| 40
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : str=7 , __UpperCAmelCase : str=3 , __UpperCAmelCase : Tuple=18 , __UpperCAmelCase : Dict=30 , __UpperCAmelCase : Any=400 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Any=True , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : Tuple=[0.48_145_466, 0.4_578_275, 0.40_821_073] , __UpperCAmelCase : Optional[Any]=[0.26_862_954, 0.26_130_258, 0.27_577_711] , __UpperCAmelCase : Union[str, Any]=True , ):
a : int = size if size is not None else {"height": 224, "width": 224}
a : List[str] = crop_size if crop_size is not None else {"height": 18, "width": 18}
a : List[Any] = parent
a : Any = batch_size
a : str = num_channels
a : Optional[int] = image_size
a : Tuple = min_resolution
a : str = max_resolution
a : Dict = do_resize
a : Any = size
a : Dict = do_center_crop
a : List[str] = crop_size
a : str = do_normalize
a : Optional[int] = image_mean
a : Tuple = image_std
a : Any = do_convert_rgb
def __snake_case ( self : Union[str, Any]):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __snake_case ( self : Dict , __UpperCAmelCase : Optional[Any]=False , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Any=False):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
a : str = []
for i in range(self.batch_size):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta))
else:
a : Tuple = []
for i in range(self.batch_size):
a , a : List[str] = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2)
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta))
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
a : List[Any] = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1)) for x in image_inputs]
if torchify:
a : Optional[Any] = [torch.from_numpy(__UpperCAmelCase) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = ChineseCLIPImageProcessor if is_vision_available() else None
def __snake_case ( self : List[str]):
a : Dict = ChineseCLIPImageProcessingTester(self , do_center_crop=__UpperCAmelCase)
@property
def __snake_case ( self : Union[str, Any]):
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : str):
a : Optional[int] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__UpperCAmelCase , "do_resize"))
self.assertTrue(hasattr(__UpperCAmelCase , "size"))
self.assertTrue(hasattr(__UpperCAmelCase , "do_center_crop"))
self.assertTrue(hasattr(__UpperCAmelCase , "center_crop"))
self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize"))
self.assertTrue(hasattr(__UpperCAmelCase , "image_mean"))
self.assertTrue(hasattr(__UpperCAmelCase , "image_std"))
self.assertTrue(hasattr(__UpperCAmelCase , "do_convert_rgb"))
def __snake_case ( self : Any):
a : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"height": 224, "width": 224})
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18})
a : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84})
def __snake_case ( self : str):
pass
def __snake_case ( self : Tuple):
# Initialize image_processing
a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image)
# Test not batched input
a : str = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a : Dict = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __snake_case ( self : List[Any]):
# Initialize image_processing
a : str = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
a : Any = self.image_processor_tester.prepare_inputs(equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray)
# Test not batched input
a : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a : Union[str, Any] = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __snake_case ( self : List[str]):
# Initialize image_processing
a : str = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
a : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor)
# Test not batched input
a : List[str] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a : str = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None
def __snake_case ( self : Union[str, Any]):
a : str = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=__UpperCAmelCase)
a : Dict = 3
@property
def __snake_case ( self : Optional[Any]):
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self : Optional[int]):
a : Tuple = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__UpperCAmelCase , "do_resize"))
self.assertTrue(hasattr(__UpperCAmelCase , "size"))
self.assertTrue(hasattr(__UpperCAmelCase , "do_center_crop"))
self.assertTrue(hasattr(__UpperCAmelCase , "center_crop"))
self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize"))
self.assertTrue(hasattr(__UpperCAmelCase , "image_mean"))
self.assertTrue(hasattr(__UpperCAmelCase , "image_std"))
self.assertTrue(hasattr(__UpperCAmelCase , "do_convert_rgb"))
def __snake_case ( self : Any):
pass
def __snake_case ( self : Union[str, Any]):
# Initialize image_processing
a : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
a : str = self.image_processor_tester.prepare_inputs(equal_resolution=__UpperCAmelCase)
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image)
# Test not batched input
a : Tuple = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a : Optional[Any] = image_processing(__UpperCAmelCase , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 40
|
"""simple docstring"""
from __future__ import annotations
class _A :
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : int = 0):
a : Tuple = key
def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
a : Dict = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__UpperCAmelCase) ^ key) for ch in content]
def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__UpperCAmelCase) ^ key) for ch in content]
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
a : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
a : Any = ""
for ch in content:
ans += chr(ord(__UpperCAmelCase) ^ key)
return ans
def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
a : Dict = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
a : str = ""
for ch in content:
ans += chr(ord(__UpperCAmelCase) ^ key)
return ans
def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int = 0):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
try:
with open(__UpperCAmelCase) as fin, open("encrypt.out" , "w+") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__UpperCAmelCase , __UpperCAmelCase))
except OSError:
return False
return True
def __snake_case ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : int):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
try:
with open(__UpperCAmelCase) as fin, open("decrypt.out" , "w+") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__UpperCAmelCase , __UpperCAmelCase))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 40
| 1
|
"""simple docstring"""
def lowercase ( A_ , A_ )-> List[str]:
'''simple docstring'''
a : str = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowercase ( A_ , A_ , A_ )-> Optional[Any]:
'''simple docstring'''
a : Dict = 0
while b > 0:
if b & 1:
a : List[Any] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 40
|
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowercase ( A_ )-> List[Any]:
'''simple docstring'''
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
class _A :
"""simple docstring"""
def __init__( self : str , __UpperCAmelCase : int):
a : List[Any] = metric_id
class _A :
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = [MetricMock(_a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def __snake_case ( self : List[str]):
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Any:
'''simple docstring'''
if "tmp_path" in args:
a : Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(A_ , match="https://huggingface.co/docs/evaluate" ):
func(*A_ )
| 40
| 1
|
"""simple docstring"""
def lowercase ( A_ )-> int:
'''simple docstring'''
a : str = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowercase ( A_ )-> int:
'''simple docstring'''
a : Dict = 0
while number > 0:
a : Dict = number % 10
sum_of_digits += last_digit
a : int = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowercase ( A_ = 100 )-> int:
'''simple docstring'''
a : Union[str, Any] = factorial(A_ )
a : Tuple = split_and_add(A_ )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 40
|
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
__lowercase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowercase ( A_ )-> list[list[int]]:
'''simple docstring'''
a : str = []
for i in range(len(A_ ) ):
a : str = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
a : Union[str, Any] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
a : Tuple = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A_ )
return next_generation
def lowercase ( A_ , A_ )-> list[Image.Image]:
'''simple docstring'''
a : List[str] = []
for _ in range(A_ ):
# Create output image
a : str = Image.new("RGB" , (len(cells[0] ), len(A_ )) )
a : Union[str, Any] = img.load()
# Save cells to image
for x in range(len(A_ ) ):
for y in range(len(cells[0] ) ):
a : Optional[Any] = 255 - cells[y][x] * 255
a : str = (colour, colour, colour)
# Save image
images.append(A_ )
a : Tuple = new_generation(A_ )
return images
if __name__ == "__main__":
__lowercase = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 40
| 1
|
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__lowercase = logging.get_logger(__name__)
def lowercase ( A_ , A_ , A_ )-> Any:
'''simple docstring'''
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def lowercase ( A_ , A_ , A_ = None )-> Union[str, Any]:
'''simple docstring'''
a : List[str] = tesseract_config if tesseract_config is not None else ""
# apply OCR
a : Optional[Any] = to_pil_image(A_ )
a , a : List[str] = pil_image.size
a : List[str] = pytesseract.image_to_data(A_ , lang=A_ , output_type="dict" , config=A_ )
a , a , a , a , a : List[str] = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
a : Optional[Any] = [idx for idx, word in enumerate(A_ ) if not word.strip()]
a : Any = [word for idx, word in enumerate(A_ ) if idx not in irrelevant_indices]
a : str = [coord for idx, coord in enumerate(A_ ) if idx not in irrelevant_indices]
a : Any = [coord for idx, coord in enumerate(A_ ) if idx not in irrelevant_indices]
a : int = [coord for idx, coord in enumerate(A_ ) if idx not in irrelevant_indices]
a : int = [coord for idx, coord in enumerate(A_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
a : Tuple = []
for x, y, w, h in zip(A_ , A_ , A_ , A_ ):
a : int = [x, y, x + w, y + h]
actual_boxes.append(A_ )
# finally, normalize the bounding boxes
a : Dict = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(A_ , A_ , A_ ) )
assert len(A_ ) == len(A_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : List[str] = ["""pixel_values"""]
def __init__( self : Union[str, Any] , __UpperCAmelCase : bool = True , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Optional[str] = "" , **__UpperCAmelCase : int , ):
super().__init__(**__UpperCAmelCase)
a : List[Any] = size if size is not None else {"height": 224, "width": 224}
a : Optional[int] = get_size_dict(__UpperCAmelCase)
a : List[Any] = do_resize
a : List[str] = size
a : List[str] = resample
a : str = apply_ocr
a : Tuple = ocr_lang
a : str = tesseract_config
def __snake_case ( self : str , __UpperCAmelCase : np.ndarray , __UpperCAmelCase : Dict[str, int] , __UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCAmelCase : Dict , ):
a : Union[str, Any] = get_size_dict(__UpperCAmelCase)
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''')
a : Tuple = (size["height"], size["width"])
return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : Dict , __UpperCAmelCase : ImageInput , __UpperCAmelCase : bool = None , __UpperCAmelCase : Dict[str, int] = None , __UpperCAmelCase : PILImageResampling = None , __UpperCAmelCase : bool = None , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **__UpperCAmelCase : List[Any] , ):
a : str = do_resize if do_resize is not None else self.do_resize
a : int = size if size is not None else self.size
a : Tuple = get_size_dict(__UpperCAmelCase)
a : Tuple = resample if resample is not None else self.resample
a : Dict = apply_ocr if apply_ocr is not None else self.apply_ocr
a : Tuple = ocr_lang if ocr_lang is not None else self.ocr_lang
a : str = tesseract_config if tesseract_config is not None else self.tesseract_config
a : List[Any] = make_list_of_images(__UpperCAmelCase)
if not valid_images(__UpperCAmelCase):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
# All transformations expect numpy arrays.
a : Optional[int] = [to_numpy_array(__UpperCAmelCase) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract")
a : Tuple = []
a : Union[str, Any] = []
for image in images:
a , a : List[str] = apply_tesseract(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
words_batch.append(__UpperCAmelCase)
boxes_batch.append(__UpperCAmelCase)
if do_resize:
a : Optional[int] = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
a : int = [flip_channel_order(__UpperCAmelCase) for image in images]
a : str = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase) for image in images]
a : Tuple = BatchFeature(data={"pixel_values": images} , tensor_type=__UpperCAmelCase)
if apply_ocr:
a : Any = words_batch
a : Dict = boxes_batch
return data
| 40
|
"""simple docstring"""
from itertools import permutations
def lowercase ( A_ )-> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
a : Optional[int] = [7, 11, 13, 17]
for i, test in enumerate(A_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase ( A_ = 10 )-> int:
'''simple docstring'''
return sum(
int("".join(map(A_ , A_ ) ) )
for num in permutations(range(A_ ) )
if is_substring_divisible(A_ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 40
| 1
|
"""simple docstring"""
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Optional[int]:
'''simple docstring'''
a : Any = StableDiffusionPipeline.from_pretrained(A_ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
a : str = load_file(A_ )
a : Dict = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
a : Dict = key.split("." )[0].split(LORA_PREFIX_TEXT_ENCODER + "_" )[-1].split("_" )
a : Dict = pipeline.text_encoder
else:
a : List[str] = key.split("." )[0].split(LORA_PREFIX_UNET + "_" )[-1].split("_" )
a : List[Any] = pipeline.unet
# find the target layer
a : Dict = layer_infos.pop(0 )
while len(A_ ) > -1:
try:
a : Dict = curr_layer.__getattr__(A_ )
if len(A_ ) > 0:
a : int = layer_infos.pop(0 )
elif len(A_ ) == 0:
break
except Exception:
if len(A_ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
a : List[str] = layer_infos.pop(0 )
a : List[str] = []
if "lora_down" in key:
pair_keys.append(key.replace("lora_down" , "lora_up" ) )
pair_keys.append(A_ )
else:
pair_keys.append(A_ )
pair_keys.append(key.replace("lora_up" , "lora_down" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
a : Optional[int] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
a : Union[str, Any] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(A_ , A_ ).unsqueeze(2 ).unsqueeze(3 )
else:
a : int = state_dict[pair_keys[0]].to(torch.floataa )
a : Optional[Any] = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(A_ , A_ )
# update visited list
for item in pair_keys:
visited.append(A_ )
return pipeline
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
__lowercase = parser.parse_args()
__lowercase = args.base_model_path
__lowercase = args.checkpoint_path
__lowercase = args.dump_path
__lowercase = args.lora_prefix_unet
__lowercase = args.lora_prefix_text_encoder
__lowercase = args.alpha
__lowercase = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
__lowercase = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 40
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Dict = KandinskyVaaControlnetPipeline
UpperCAmelCase : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
UpperCAmelCase : Optional[Any] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
UpperCAmelCase : Dict = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCAmelCase : Optional[int] = False
@property
def __snake_case ( self : Optional[Any]):
return 32
@property
def __snake_case ( self : Dict):
return 32
@property
def __snake_case ( self : Dict):
return self.time_input_dim
@property
def __snake_case ( self : Any):
return self.time_input_dim * 4
@property
def __snake_case ( self : str):
return 100
@property
def __snake_case ( self : str):
torch.manual_seed(0)
a : str = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
a : Dict = UNetaDConditionModel(**__UpperCAmelCase)
return model
@property
def __snake_case ( self : str):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __snake_case ( self : Union[str, Any]):
torch.manual_seed(0)
a : Dict = VQModel(**self.dummy_movq_kwargs)
return model
def __snake_case ( self : Optional[Any]):
a : Optional[Any] = self.dummy_unet
a : int = self.dummy_movq
a : str = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__UpperCAmelCase , )
a : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=0):
a : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase)
a : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
__UpperCAmelCase)
# create hint
a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase)
if str(__UpperCAmelCase).startswith("mps"):
a : Union[str, Any] = torch.manual_seed(__UpperCAmelCase)
else:
a : List[Any] = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase)
a : str = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __snake_case ( self : Dict):
a : str = "cpu"
a : Tuple = self.get_dummy_components()
a : Dict = self.pipeline_class(**__UpperCAmelCase)
a : Optional[int] = pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCAmelCase))
a : Any = output.images
a : Any = pipe(
**self.get_dummy_inputs(__UpperCAmelCase) , return_dict=__UpperCAmelCase , )[0]
a : Union[str, Any] = image[0, -3:, -3:, -1]
a : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a : Tuple = np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Optional[int]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : List[str]):
a : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy")
a : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
a : List[Any] = torch.from_numpy(np.array(__UpperCAmelCase)).float() / 255.0
a : str = hint.permute(2 , 0 , 1).unsqueeze(0)
a : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(__UpperCAmelCase)
a : List[str] = KandinskyVaaControlnetPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa)
a : int = pipeline.to(__UpperCAmelCase)
pipeline.set_progress_bar_config(disable=__UpperCAmelCase)
a : Tuple = "A robot, 4k photo"
a : Any = torch.Generator(device="cuda").manual_seed(0)
a , a : int = pipe_prior(
__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
a : str = torch.Generator(device="cuda").manual_seed(0)
a : Union[str, Any] = pipeline(
image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , hint=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , output_type="np" , )
a : str = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase)
| 40
| 1
|
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__lowercase = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
__lowercase = parser.parse_args()
__lowercase = """cpu"""
__lowercase = """a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"""
__lowercase = """path-to-your-trained-model"""
__lowercase = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__lowercase = pipe.to(device)
# to channels last
__lowercase = pipe.unet.to(memory_format=torch.channels_last)
__lowercase = pipe.vae.to(memory_format=torch.channels_last)
__lowercase = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__lowercase = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__lowercase = torch.randn(2, 4, 64, 64)
__lowercase = torch.rand(1) * 999
__lowercase = torch.randn(2, 77, 768)
__lowercase = (sample, timestep, encoder_hidden_status)
try:
__lowercase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__lowercase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__lowercase = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__lowercase = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__lowercase = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__lowercase = 666
__lowercase = torch.Generator(device).manual_seed(seed)
__lowercase = {"""generator""": generator}
if args.steps is not None:
__lowercase = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__lowercase = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 40
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def lowercase ( A_ )-> Dict:
'''simple docstring'''
a : str = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
a : Union[str, Any] = 128
elif "12-12" in model_name:
a : List[Any] = 12
a : str = 12
elif "14-14" in model_name:
a : List[Any] = 14
a : Optional[int] = 14
elif "16-16" in model_name:
a : Any = 16
a : List[Any] = 16
else:
raise ValueError("Model not supported" )
a : Optional[int] = "huggingface/label-files"
if "speech-commands" in model_name:
a : Optional[int] = 35
a : List[str] = "speech-commands-v2-id2label.json"
else:
a : Optional[Any] = 527
a : Tuple = "audioset-id2label.json"
a : List[str] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
a : Union[str, Any] = {int(A_ ): v for k, v in idalabel.items()}
a : Any = idalabel
a : str = {v: k for k, v in idalabel.items()}
return config
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
if "module.v" in name:
a : Union[str, Any] = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
a : List[Any] = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
a : Union[str, Any] = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
a : str = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
a : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
a : Union[str, Any] = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
a : str = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
a : Tuple = name.replace("attn" , "attention.self" )
if "norm1" in name:
a : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
a : Union[str, Any] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
a : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a : Optional[Any] = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
a : Tuple = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
a : List[str] = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
a : Optional[int] = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def lowercase ( A_ , A_ )-> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
a : str = orig_state_dict.pop(A_ )
if "qkv" in key:
a : int = key.split("." )
a : Optional[int] = int(key_split[3] )
a : int = config.hidden_size
if "weight" in key:
a : List[str] = val[:dim, :]
a : Any = val[dim : dim * 2, :]
a : int = val[-dim:, :]
else:
a : Optional[Any] = val[:dim]
a : Union[str, Any] = val[dim : dim * 2]
a : str = val[-dim:]
else:
a : str = val
return orig_state_dict
def lowercase ( A_ )-> Dict:
'''simple docstring'''
a : Union[str, Any] = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(A_ , A_ )
@torch.no_grad()
def lowercase ( A_ , A_ , A_=False )-> Optional[int]:
'''simple docstring'''
a : Optional[int] = get_audio_spectrogram_transformer_config(A_ )
a : Dict = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
a : Any = model_name_to_url[model_name]
a : List[Any] = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" )
# remove some keys
remove_keys(A_ )
# rename some keys
a : Union[str, Any] = convert_state_dict(A_ , A_ )
# load 🤗 model
a : List[str] = ASTForAudioClassification(A_ )
model.eval()
model.load_state_dict(A_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
a : Tuple = -4.2_6_7_7_3_9_3 if "speech-commands" not in model_name else -6.8_4_5_9_7_8
a : Union[str, Any] = 4.5_6_8_9_9_7_4 if "speech-commands" not in model_name else 5.5_6_5_4_5_2_6
a : str = 1_024 if "speech-commands" not in model_name else 128
a : List[Any] = ASTFeatureExtractor(mean=A_ , std=A_ , max_length=A_ )
if "speech-commands" in model_name:
a : List[str] = load_dataset("speech_commands" , "v0.02" , split="validation" )
a : int = dataset[0]["audio"]["array"]
else:
a : Tuple = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
a , a : Tuple = torchaudio.load(A_ )
a : Optional[Any] = waveform.squeeze().numpy()
a : Union[str, Any] = feature_extractor(A_ , sampling_rate=16_000 , return_tensors="pt" )
# forward pass
a : Optional[Any] = model(**A_ )
a : List[str] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
a : Any = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
a : Optional[int] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
a : List[str] = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
a : Tuple = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
a : int = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
a : Any = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
a : Dict = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] )
elif model_name == "ast-finetuned-speech-commands-v2":
a : Union[str, Any] = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , A_ , atol=1e-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(A_ ).mkdir(exist_ok=A_ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(A_ )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowercase = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 40
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowercase = logging.get_logger(__name__)
class _A ( _a ):
"""simple docstring"""
def __init__( self : List[Any] , *__UpperCAmelCase : Optional[Any] , **__UpperCAmelCase : Optional[Any]):
warnings.warn(
"The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use BeitImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
| 40
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase = {
"""configuration_rag""": ["""RagConfig"""],
"""retrieval_rag""": ["""RagRetriever"""],
"""tokenization_rag""": ["""RagTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""RagModel""",
"""RagPreTrainedModel""",
"""RagSequenceForGeneration""",
"""RagTokenForGeneration""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TFRagModel""",
"""TFRagPreTrainedModel""",
"""TFRagSequenceForGeneration""",
"""TFRagTokenForGeneration""",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowercase = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""CLIPFeatureExtractor"""]
__lowercase = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _A ( _a ,_a ,_a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : str = StableDiffusionInpaintPipeline
UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase : Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase : int = frozenset([] )
def __snake_case ( self : Dict):
torch.manual_seed(0)
a : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , )
a : Tuple = PNDMScheduler(skip_prk_steps=__UpperCAmelCase)
torch.manual_seed(0)
a : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
a : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
a : Any = CLIPTextModel(__UpperCAmelCase)
a : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
a : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any]=0):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase)
a : List[str] = image.cpu().permute(0 , 2 , 3 , 1)[0]
a : Union[str, Any] = Image.fromarray(np.uinta(__UpperCAmelCase)).convert("RGB").resize((64, 64))
a : Dict = Image.fromarray(np.uinta(image + 4)).convert("RGB").resize((64, 64))
if str(__UpperCAmelCase).startswith("mps"):
a : Tuple = torch.manual_seed(__UpperCAmelCase)
else:
a : Tuple = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase)
a : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __snake_case ( self : List[str]):
a : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
a : Tuple = self.get_dummy_components()
a : Optional[int] = StableDiffusionInpaintPipeline(**__UpperCAmelCase)
a : int = sd_pipe.to(__UpperCAmelCase)
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a : Any = self.get_dummy_inputs(__UpperCAmelCase)
a : Optional[int] = sd_pipe(**__UpperCAmelCase).images
a : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a : int = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __snake_case ( self : str):
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Union[str, Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Dict):
a : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
a : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
a : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy")
a : Tuple = "stabilityai/stable-diffusion-2-inpainting"
a : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(__UpperCAmelCase , safety_checker=__UpperCAmelCase)
pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
pipe.enable_attention_slicing()
a : Any = "Face of a yellow cat, high resolution, sitting on a park bench"
a : str = torch.manual_seed(0)
a : Union[str, Any] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 9e-3
def __snake_case ( self : Any):
a : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
a : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
a : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy")
a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting"
a : Any = StableDiffusionInpaintPipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=__UpperCAmelCase , )
pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
pipe.enable_attention_slicing()
a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench"
a : Dict = torch.manual_seed(0)
a : List[Any] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , )
a : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5e-1
def __snake_case ( self : int):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
a : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting"
a : Optional[int] = PNDMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler")
a : int = StableDiffusionInpaintPipeline.from_pretrained(
__UpperCAmelCase , safety_checker=__UpperCAmelCase , scheduler=__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench"
a : Optional[int] = torch.manual_seed(0)
a : str = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="np" , )
a : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 40
| 1
|
"""simple docstring"""
def lowercase ( A_ )-> int:
'''simple docstring'''
a : Any = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def lowercase ( A_ = 100 )-> int:
'''simple docstring'''
a : Tuple = 1
a : Optional[int] = 2
for i in range(2 , max_n + 1 ):
a : Optional[Any] = pre_numerator
a : Optional[int] = 2 * i // 3 if i % 3 == 0 else 1
a : Dict = cur_numerator
a : List[str] = e_cont * pre_numerator + temp
return sum_digits(A_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 40
|
"""simple docstring"""
def lowercase ( A_ )-> bool:
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
a : Tuple = sorted(string.lower() )
return len(A_ ) == len(set(A_ ) )
if __name__ == "__main__":
__lowercase = input("""Enter a string """).strip()
__lowercase = is_isogram(input_str)
print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
| 40
| 1
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( A_ )-> int:
'''simple docstring'''
a : Optional[int] = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
a : Any = True if "large" in model_name or "huge" in model_name else False
a : Any = True if "large" in model_name or "huge" in model_name else False
a : Optional[Any] = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
a : Optional[int] = [3, 3, 3, 3]
a : Dict = [5, 5, 5, 5]
elif "fl4" in model_name:
a : List[str] = [4, 4, 4, 4]
a : Any = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
a : Tuple = [3, 3, 3, 3]
if "lrf" in model_name:
a : Any = [3, 3, 3, 3]
else:
a : str = [2, 2, 2, 2]
if "tiny" in model_name:
a : Optional[Any] = 96
elif "small" in model_name:
a : List[Any] = 96
elif "base" in model_name:
a : Tuple = 128
elif "large" in model_name:
a : Optional[Any] = 192
elif "xlarge" in model_name:
a : Dict = 256
elif "huge" in model_name:
a : Optional[int] = 352
# set label information
a : List[str] = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
a : Any = "imagenet-22k-id2label.json"
else:
a : Union[str, Any] = "imagenet-1k-id2label.json"
a : Any = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
a : Optional[Any] = {int(A_ ): v for k, v in idalabel.items()}
a : List[Any] = {v: k for k, v in idalabel.items()}
a : List[Any] = FocalNetConfig(
embed_dim=A_ , depths=A_ , focal_levels=A_ , focal_windows=A_ , use_conv_embed=A_ , idalabel=A_ , labelaid=A_ , use_post_layernorm=A_ , use_layerscale=A_ , )
return config
def lowercase ( A_ )-> str:
'''simple docstring'''
if "patch_embed.proj" in name:
a : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a : str = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
a : List[Any] = "encoder." + name
if "encoder.layers" in name:
a : Any = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
a : Optional[Any] = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
a : List[str] = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
a : Any = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
a : Tuple = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
a : List[Any] = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
a : Optional[int] = "layernorm.weight"
if name == "norm.bias":
a : Optional[int] = "layernorm.bias"
if "head" in name:
a : Union[str, Any] = name.replace("head" , "classifier" )
else:
a : str = "focalnet." + name
return name
def lowercase ( A_ , A_ , A_=False )-> Tuple:
'''simple docstring'''
a : Dict = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
a : List[str] = model_name_to_url[model_name]
print("Checkpoint URL: " , A_ )
a : str = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
a : int = state_dict.pop(A_ )
a : Dict = val
a : Optional[int] = get_focalnet_config(A_ )
a : str = FocalNetForImageClassification(A_ )
model.eval()
# load state dict
model.load_state_dict(A_ )
# verify conversion
a : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
a : Any = BitImageProcessor(
do_resize=A_ , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=A_ , crop_size=224 , do_normalize=A_ , image_mean=A_ , image_std=A_ , )
a : str = Image.open(requests.get(A_ , stream=A_ ).raw )
a : Optional[int] = processor(images=A_ , return_tensors="pt" )
a : Dict = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
a : Optional[Any] = image_transforms(A_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , A_ , atol=1e-4 )
a : Optional[Any] = model(**A_ )
a : Any = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
a : Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
a : Union[str, Any] = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
a : Union[str, Any] = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
a : str = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
a : Tuple = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
a : Dict = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , A_ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
processor.save_pretrained(A_ )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
__lowercase = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 40
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__lowercase = datasets.utils.logging.get_logger(__name__)
@dataclass
class _A ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase : int = 1_0_0_0_0
UpperCAmelCase : Optional[List[str]] = None
UpperCAmelCase : Optional[datasets.Features] = None
class _A ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase : str = ParquetConfig
def __snake_case ( self : Tuple):
return datasets.DatasetInfo(features=self.config.features)
def __snake_case ( self : List[Any] , __UpperCAmelCase : str):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''')
a : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__UpperCAmelCase , (str, list, tuple)):
a : Dict = data_files
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a : List[Any] = [dl_manager.iter_files(__UpperCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})]
a : Dict = []
for split_name, files in data_files.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a : Tuple = [dl_manager.iter_files(__UpperCAmelCase) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__UpperCAmelCase):
with open(__UpperCAmelCase , "rb") as f:
a : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase))
break
splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"files": files}))
return splits
def __snake_case ( self : List[str] , __UpperCAmelCase : pa.Table):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
a : Optional[int] = table_cast(__UpperCAmelCase , self.info.features.arrow_schema)
return pa_table
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int):
a : Tuple = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema) != sorted(self.config.columns):
raise ValueError(
f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''')
for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase)):
with open(__UpperCAmelCase , "rb") as f:
a : Tuple = pq.ParquetFile(__UpperCAmelCase)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)):
a : Optional[Any] = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'''{file_idx}_{batch_idx}''', self._cast_table(__UpperCAmelCase)
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(__UpperCAmelCase)}: {e}''')
raise
| 40
| 1
|
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__lowercase = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _A ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , __UpperCAmelCase : str):
super().__init__()
a : int = torchvision.models.resnetaaa(pretrained=__UpperCAmelCase)
a : List[str] = list(model.children())[:-2]
a : Optional[int] = nn.Sequential(*__UpperCAmelCase)
a : Union[str, Any] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds])
def __snake_case ( self : str , __UpperCAmelCase : Tuple):
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
a : List[Any] = self.pool(self.model(__UpperCAmelCase))
a : Dict = torch.flatten(__UpperCAmelCase , start_dim=2)
a : List[str] = out.transpose(1 , 2).contiguous()
return out # BxNx2048
class _A ( _a ):
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int]):
a : Dict = [json.loads(__UpperCAmelCase) for l in open(__UpperCAmelCase)]
a : Any = os.path.dirname(__UpperCAmelCase)
a : Optional[Any] = tokenizer
a : Tuple = labels
a : int = len(__UpperCAmelCase)
a : Optional[Any] = max_seq_length
a : Union[str, Any] = transforms
def __len__( self : Optional[int]):
return len(self.data)
def __getitem__( self : Union[str, Any] , __UpperCAmelCase : Optional[int]):
a : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=__UpperCAmelCase))
a , a , a : Optional[int] = sentence[0], sentence[1:-1], sentence[-1]
a : Optional[Any] = sentence[: self.max_seq_length]
a : Optional[Any] = torch.zeros(self.n_classes)
a : int = 1
a : Union[str, Any] = Image.open(os.path.join(self.data_dir , self.data[index]["img"])).convert("RGB")
a : str = self.transforms(__UpperCAmelCase)
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def __snake_case ( self : Optional[int]):
a : int = Counter()
for row in self.data:
label_freqs.update(row["label"])
return label_freqs
def lowercase ( A_ )-> Union[str, Any]:
'''simple docstring'''
a : Any = [len(row["sentence"] ) for row in batch]
a , a : Any = len(A_ ), max(A_ )
a : Optional[Any] = torch.zeros(A_ , A_ , dtype=torch.long )
a : List[str] = torch.zeros(A_ , A_ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(A_ , A_ ) ):
a : Any = input_row["sentence"]
a : List[Any] = 1
a : List[str] = torch.stack([row["image"] for row in batch] )
a : int = torch.stack([row["label"] for row in batch] )
a : Any = torch.stack([row["image_start_token"] for row in batch] )
a : str = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowercase ( )-> Dict:
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowercase ( )-> Any:
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4_6_7_7_7_0_4_4, 0.4_4_5_3_1_4_2_9, 0.4_0_6_6_1_0_1_7] , std=[0.1_2_2_2_1_9_9_4, 0.1_2_1_4_5_8_3_5, 0.1_4_3_8_0_4_6_9] , ),
] )
| 40
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : int = """dpr"""
def __init__( self : List[Any] , __UpperCAmelCase : int=30522 , __UpperCAmelCase : Union[str, Any]=768 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : str="absolute" , __UpperCAmelCase : int = 0 , **__UpperCAmelCase : Tuple , ):
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a : List[Any] = vocab_size
a : Optional[Any] = hidden_size
a : Union[str, Any] = num_hidden_layers
a : Dict = num_attention_heads
a : int = hidden_act
a : Any = intermediate_size
a : Any = hidden_dropout_prob
a : Dict = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Union[str, Any] = type_vocab_size
a : Optional[Any] = initializer_range
a : Dict = layer_norm_eps
a : int = projection_dim
a : str = position_embedding_type
| 40
| 1
|
"""simple docstring"""
from math import factorial
def lowercase ( A_ , A_ )-> int:
'''simple docstring'''
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(A_ ) // (factorial(A_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"""If a class of 40 students must be arranged into groups of""",
f'''4 for group projects, there are {combinations(40, 4)} ways''',
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
f'''are {combinations(10, 3)} ways that first, second and''',
"""third place can be awarded.""",
)
| 40
|
"""simple docstring"""
class _A :
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : int):
a : Tuple = size
a : Dict = [0] * size
a : Optional[int] = [0] * size
@staticmethod
def __snake_case ( __UpperCAmelCase : int):
return index | (index + 1)
@staticmethod
def __snake_case ( __UpperCAmelCase : int):
return (index & (index + 1)) - 1
def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int):
a : Union[str, Any] = value
while index < self.size:
a : Dict = self.get_prev(__UpperCAmelCase) + 1
if current_left_border == index:
a : Optional[int] = value
else:
a : Any = max(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = self.get_next(__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int):
right -= 1 # Because of right is exclusive
a : List[str] = 0
while left <= right:
a : Dict = self.get_prev(__UpperCAmelCase)
if left <= current_left:
a : Optional[int] = max(__UpperCAmelCase , self.tree[right])
a : Optional[Any] = current_left
else:
a : List[str] = max(__UpperCAmelCase , self.arr[right])
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _A :
"""simple docstring"""
def __init__( self : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str]=13 , __UpperCAmelCase : Dict=32 , __UpperCAmelCase : List[Any]=3 , __UpperCAmelCase : str=4 , __UpperCAmelCase : Dict=[10, 20, 30, 40] , __UpperCAmelCase : Any=[2, 2, 3, 2] , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : Optional[Any]=37 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Tuple=10 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Optional[Any]=["stage2", "stage3", "stage4"] , __UpperCAmelCase : Optional[int]=[2, 3, 4] , __UpperCAmelCase : Any=None , ):
a : Union[str, Any] = parent
a : Optional[int] = batch_size
a : str = image_size
a : List[Any] = num_channels
a : List[str] = num_stages
a : Optional[int] = hidden_sizes
a : List[Any] = depths
a : int = is_training
a : Optional[int] = use_labels
a : str = intermediate_size
a : str = hidden_act
a : int = num_labels
a : Tuple = initializer_range
a : Optional[Any] = out_features
a : List[Any] = out_indices
a : Union[str, Any] = scope
def __snake_case ( self : List[str]):
a : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : str = None
if self.use_labels:
a : Any = ids_tensor([self.batch_size] , self.num_labels)
a : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : int):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __snake_case ( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Any , __UpperCAmelCase : Dict):
a : int = ConvNextVaModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : List[str] = model(__UpperCAmelCase)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __snake_case ( self : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Dict):
a : Optional[Any] = ConvNextVaForImageClassification(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : List[str] = model(__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __snake_case ( self : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str]):
a : Optional[int] = ConvNextVaBackbone(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : List[Any] = model(__UpperCAmelCase)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
a : Any = None
a : Optional[int] = ConvNextVaBackbone(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : Optional[Any] = model(__UpperCAmelCase)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def __snake_case ( self : Union[str, Any]):
a : str = self.prepare_config_and_inputs()
a , a , a : Any = config_and_inputs
a : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
def __snake_case ( self : Union[str, Any]):
a : int = self.prepare_config_and_inputs()
a , a , a : Optional[int] = config_and_inputs
a : Optional[int] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class _A ( _a ,_a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Tuple = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase : Optional[int] = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : str = False
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : List[str] = False
UpperCAmelCase : Optional[Any] = False
def __snake_case ( self : List[str]):
a : Tuple = ConvNextVaModelTester(self)
a : str = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37)
def __snake_case ( self : str):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : Any):
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds")
def __snake_case ( self : Optional[int]):
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings")
def __snake_case ( self : int):
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking")
def __snake_case ( self : str):
pass
def __snake_case ( self : Optional[int]):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
a , a : Dict = self.model_tester.prepare_config_and_inputs_with_labels()
a : Tuple = True
if model_class.__name__ in [
*get_values(__UpperCAmelCase),
*get_values(__UpperCAmelCase),
]:
continue
a : int = model_class(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.train()
a : Dict = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase)
a : Optional[int] = model(**__UpperCAmelCase).loss
loss.backward()
def __snake_case ( self : Dict):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
a , a : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
a : Optional[int] = False
a : Optional[Any] = True
if (
model_class.__name__
in [*get_values(__UpperCAmelCase), *get_values(__UpperCAmelCase)]
or not model_class.supports_gradient_checkpointing
):
continue
a : Tuple = model_class(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.gradient_checkpointing_enable()
model.train()
a : Union[str, Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase)
a : Optional[int] = model(**__UpperCAmelCase).loss
loss.backward()
def __snake_case ( self : Dict):
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Any = model_class(__UpperCAmelCase)
a : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Any = [*signature.parameters.keys()]
a : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase)
def __snake_case ( self : Dict):
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def __snake_case ( self : Optional[Any]):
def check_hidden_states_output(__UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int]):
a : Optional[int] = model_class(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
with torch.no_grad():
a : Optional[Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase))
a : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a : List[Any] = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase) , expected_num_stages + 1)
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a , a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : List[Any] = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Dict = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : Optional[int]):
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase)
@slow
def __snake_case ( self : Optional[Any]):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Tuple = ConvNextVaModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
def lowercase ( )-> List[str]:
'''simple docstring'''
a : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : Union[str, Any]):
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224") if is_vision_available() else None
@slow
def __snake_case ( self : int):
a : Optional[int] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224").to(__UpperCAmelCase)
a : Any = self.default_image_processor
a : Union[str, Any] = prepare_img()
a : List[str] = preprocessor(images=__UpperCAmelCase , return_tensors="pt").to(__UpperCAmelCase)
# forward pass
with torch.no_grad():
a : Any = model(**__UpperCAmelCase)
# verify the logits
a : str = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , __UpperCAmelCase)
a : Union[str, Any] = torch.tensor([0.9_996, 0.1_966, -0.4_386]).to(__UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4))
| 40
|
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[Any]):
a : str = 0
a : Optional[int] = [0]
a : Union[str, Any] = [0]
a : Any = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0)
a : List[str] = [60]
a : str = [10]
a : Optional[int] = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0)
def __snake_case ( self : Optional[int]):
a : Any = 3
a : str = [1, 2, 3]
a : Tuple = [3, 2, 1]
a : Any = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5)
def __snake_case ( self : Tuple):
a : int = 50
a : List[Any] = [60, 100, 120]
a : Optional[int] = [10, 20, 30]
a : str = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220)
if __name__ == "__main__":
unittest.main()
| 40
| 1
|
"""simple docstring"""
from typing import Any
class _A :
"""simple docstring"""
def __init__( self : Any , __UpperCAmelCase : Any):
a : Optional[Any] = data
a : Optional[int] = None
def __repr__( self : str):
return f'''Node({self.data})'''
class _A :
"""simple docstring"""
def __init__( self : str):
a : Dict = None
def __iter__( self : int):
a : str = self.head
while node:
yield node.data
a : Union[str, Any] = node.next
def __len__( self : Any):
return sum(1 for _ in self)
def __repr__( self : Any):
return "->".join([str(__UpperCAmelCase) for item in self])
def __getitem__( self : int , __UpperCAmelCase : int):
if not 0 <= index < len(self):
raise ValueError("list index out of range.")
for i, node in enumerate(self):
if i == index:
return node
return None
def __setitem__( self : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Any):
if not 0 <= index < len(self):
raise ValueError("list index out of range.")
a : List[Any] = self.head
for _ in range(__UpperCAmelCase):
a : Dict = current.next
a : Optional[Any] = data
def __snake_case ( self : Any , __UpperCAmelCase : Any):
self.insert_nth(len(self) , __UpperCAmelCase)
def __snake_case ( self : List[Any] , __UpperCAmelCase : Any):
self.insert_nth(0 , __UpperCAmelCase)
def __snake_case ( self : Optional[int] , __UpperCAmelCase : int , __UpperCAmelCase : Any):
if not 0 <= index <= len(self):
raise IndexError("list index out of range")
a : Any = Node(__UpperCAmelCase)
if self.head is None:
a : int = new_node
elif index == 0:
a : Dict = self.head # link new_node to head
a : Union[str, Any] = new_node
else:
a : List[str] = self.head
for _ in range(index - 1):
a : int = temp.next
a : Optional[int] = temp.next
a : int = new_node
def __snake_case ( self : int): # print every node data
print(self)
def __snake_case ( self : Optional[Any]):
return self.delete_nth(0)
def __snake_case ( self : Any): # delete from tail
return self.delete_nth(len(self) - 1)
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : int = 0):
if not 0 <= index <= len(self) - 1: # test if index is valid
raise IndexError("List index out of range.")
a : Optional[Any] = self.head # default first node
if index == 0:
a : int = self.head.next
else:
a : List[str] = self.head
for _ in range(index - 1):
a : int = temp.next
a : Any = temp.next
a : str = temp.next.next
return delete_node.data
def __snake_case ( self : int):
return self.head is None
def __snake_case ( self : List[Any]):
a : str = None
a : Tuple = self.head
while current:
# Store the current node's next node.
a : Any = current.next
# Make the current node's next point backwards
a : Any = prev
# Make the previous node be the current node
a : Optional[int] = current
# Make the current node the next node (to progress iteration)
a : int = next_node
# Return prev in order to put the head at the end
a : Dict = prev
def lowercase ( )-> None:
'''simple docstring'''
a : Any = LinkedList()
assert linked_list.is_empty() is True
assert str(A_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(A_ ) == i
linked_list.insert_nth(A_ , i + 1 )
assert str(A_ ) == "->".join(str(A_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(A_ ) == "->".join(str(A_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(A_ ) == 9
assert str(A_ ) == "->".join(str(A_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
a : Optional[int] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(A_ ) == "->".join(str(A_ ) for i in range(-8 , 1 ) )
def lowercase ( )-> None:
'''simple docstring'''
a : str = [
-9,
100,
Node(77_345_112 ),
"dlrow olleH",
7,
5_555,
0,
-1_9_2.5_5_5_5_5,
"Hello, world!",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
a : str = LinkedList()
for i in test_input:
linked_list.insert_tail(A_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(A_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
a : int = linked_list.delete_head()
assert result == -9
assert (
str(A_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
a : int = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(A_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
a : Union[str, Any] = linked_list.delete_nth(10 )
assert result is None
assert (
str(A_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(A_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(A_ )
assert (
str(A_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(A_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowercase ( )-> Optional[int]:
'''simple docstring'''
from doctest import testmod
testmod()
a : int = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(A_ )
print("\nReading/changing Node data using indexing:" )
print(F'''Element at Position 1: {linked_list[1]}''' )
a : Tuple = input("Enter New Value: " ).strip()
print("New list:" )
print(A_ )
print(F'''length of linked_list is : {len(A_ )}''' )
if __name__ == "__main__":
main()
| 40
|
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : str = LayoutLMTokenizer
UpperCAmelCase : int = LayoutLMTokenizerFast
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Optional[Any] = True
def __snake_case ( self : Optional[int]):
super().setUp()
a : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def __snake_case ( self : Optional[int] , **__UpperCAmelCase : Tuple):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str):
a : Tuple = "UNwant\u00E9d,running"
a : Dict = "unwanted, running"
return input_text, output_text
def __snake_case ( self : Any):
a : List[Any] = self.tokenizer_class(self.vocab_file)
a : str = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [7, 4, 5, 10, 8, 9])
def __snake_case ( self : Dict):
pass
| 40
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : str = """roberta"""
def __init__( self : Tuple , __UpperCAmelCase : Tuple=50265 , __UpperCAmelCase : Dict=768 , __UpperCAmelCase : Optional[int]=12 , __UpperCAmelCase : Optional[int]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Dict=512 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : List[Any]=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : List[Any]=1 , __UpperCAmelCase : str=0 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Optional[int]="absolute" , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : str , ):
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a : int = vocab_size
a : Optional[Any] = hidden_size
a : Any = num_hidden_layers
a : Dict = num_attention_heads
a : Dict = hidden_act
a : int = intermediate_size
a : str = hidden_dropout_prob
a : List[Any] = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : str = type_vocab_size
a : Optional[int] = initializer_range
a : Optional[Any] = layer_norm_eps
a : Optional[Any] = position_embedding_type
a : Any = use_cache
a : List[str] = classifier_dropout
class _A ( _a ):
"""simple docstring"""
@property
def __snake_case ( self : int):
if self.task == "multiple-choice":
a : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
a : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 40
|
"""simple docstring"""
def lowercase ( A_ )-> str:
'''simple docstring'''
if isinstance(A_ , A_ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(A_ , A_ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
a : Optional[Any] = False
if num < 0:
a : Tuple = True
a : str = -num
a : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(A_ ) for e in binary )
return "0b" + "".join(str(A_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 1
|
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : str = """umt5"""
UpperCAmelCase : Optional[Any] = ["""past_key_values"""]
def __init__( self : Optional[Any] , __UpperCAmelCase : int=250112 , __UpperCAmelCase : List[str]=512 , __UpperCAmelCase : Optional[int]=64 , __UpperCAmelCase : str=1024 , __UpperCAmelCase : Union[str, Any]=8 , __UpperCAmelCase : str=None , __UpperCAmelCase : List[str]=6 , __UpperCAmelCase : str=32 , __UpperCAmelCase : List[Any]=128 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : str=1e-6 , __UpperCAmelCase : List[Any]=1.0 , __UpperCAmelCase : Dict="gated-gelu" , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : int=True , __UpperCAmelCase : Any="T5Tokenizer" , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : str=0 , __UpperCAmelCase : List[Any]=1 , __UpperCAmelCase : str=0 , **__UpperCAmelCase : Optional[Any] , ):
super().__init__(
is_encoder_decoder=__UpperCAmelCase , tokenizer_class=__UpperCAmelCase , tie_word_embeddings=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
a : List[str] = vocab_size
a : int = d_model
a : Any = d_kv
a : List[Any] = d_ff
a : Dict = num_layers
a : Dict = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a : Tuple = num_heads
a : Tuple = relative_attention_num_buckets
a : Optional[int] = relative_attention_max_distance
a : List[str] = dropout_rate
a : int = layer_norm_epsilon
a : Optional[Any] = initializer_factor
a : Any = feed_forward_proj
a : int = use_cache
a : Union[str, Any] = self.feed_forward_proj.split("-")
a : Tuple = act_info[-1]
a : Tuple = act_info[0] == "gated"
if len(__UpperCAmelCase) > 1 and act_info[0] != "gated" or len(__UpperCAmelCase) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'")
if feed_forward_proj == "gated-gelu":
a : Tuple = "gelu_new"
@property
def __snake_case ( self : Optional[int]):
return self.d_model
@property
def __snake_case ( self : Tuple):
return self.num_heads
@property
def __snake_case ( self : Union[str, Any]):
return self.num_layers
class _A ( _a ):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __snake_case ( self : Optional[Any]):
a : Tuple = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
a : Optional[Any] = "past_encoder_sequence + sequence"
a : List[Any] = {0: "batch"}
a : List[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
a : int = {0: "batch", 1: "decoder_sequence"}
a : List[Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction="inputs")
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __snake_case ( self : Tuple):
return 13
@property
def __snake_case ( self : Union[str, Any]):
return 5e-4
| 40
|
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def lowercase ( A_ , A_ , A_ , A_ )-> Union[str, Any]:
'''simple docstring'''
a : Any = sorted(zip(A_ , A_ ) , key=lambda A_ : x[0] / x[1] , reverse=A_ )
a , a : int = [i[0] for i in r], [i[1] for i in r]
a : Union[str, Any] = list(accumulate(A_ ) )
a : Optional[Any] = bisect(A_ , A_ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 1
|
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : torch.FloatTensor
UpperCAmelCase : Optional[torch.FloatTensor] = None
def lowercase ( A_ , A_=0.9_9_9 , A_="cosine" , )-> Any:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(A_ ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A_ ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
a : Tuple = []
for i in range(A_ ):
a : List[Any] = i / num_diffusion_timesteps
a : Any = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A_ ) / alpha_bar_fn(A_ ) , A_ ) )
return torch.tensor(A_ , dtype=torch.floataa )
class _A ( _a ,_a ):
"""simple docstring"""
@register_to_config
def __init__( self : Union[str, Any] , __UpperCAmelCase : int = 1000 , __UpperCAmelCase : str = "fixed_small_log" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[float] = 1.0 , __UpperCAmelCase : str = "epsilon" , __UpperCAmelCase : str = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'")
a : Tuple = betas_for_alpha_bar(__UpperCAmelCase)
a : List[str] = 1.0 - self.betas
a : Union[str, Any] = torch.cumprod(self.alphas , dim=0)
a : int = torch.tensor(1.0)
# standard deviation of the initial noise distribution
a : str = 1.0
# setable values
a : Dict = None
a : Optional[Any] = torch.from_numpy(np.arange(0 , __UpperCAmelCase)[::-1].copy())
a : Optional[Any] = variance_type
def __snake_case ( self : Dict , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[int] = None):
return sample
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, torch.device] = None):
a : int = num_inference_steps
a : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
a : Optional[int] = (np.arange(0 , __UpperCAmelCase) * step_ratio).round()[::-1].copy().astype(np.intaa)
a : List[Any] = torch.from_numpy(__UpperCAmelCase).to(__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str=None , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : str=None):
if prev_timestep is None:
a : Union[str, Any] = t - 1
a : str = self.alphas_cumprod[t]
a : str = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
a : int = 1 - alpha_prod_t
a : Union[str, Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
a : Optional[Any] = self.betas[t]
else:
a : Any = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
a : List[str] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
a : Any = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
a : Union[str, Any] = torch.log(torch.clamp(__UpperCAmelCase , min=1e-20))
a : Any = torch.exp(0.5 * variance)
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
a : List[str] = variance.log()
a : Union[str, Any] = beta.log()
a : Any = (predicted_variance + 1) / 2
a : Tuple = frac * max_log + (1 - frac) * min_log
return variance
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : int , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : bool = True , ):
a : Any = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
a , a : List[Any] = torch.split(__UpperCAmelCase , sample.shape[1] , dim=1)
else:
a : List[Any] = None
# 1. compute alphas, betas
if prev_timestep is None:
a : int = t - 1
a : Optional[Any] = self.alphas_cumprod[t]
a : Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
a : List[Any] = 1 - alpha_prod_t
a : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
a : str = self.betas[t]
a : Optional[int] = self.alphas[t]
else:
a : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
a : Optional[Any] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
a : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
a : Optional[int] = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
" for the UnCLIPScheduler.")
# 3. Clip "predicted x_0"
if self.config.clip_sample:
a : List[str] = torch.clamp(
__UpperCAmelCase , -self.config.clip_sample_range , self.config.clip_sample_range)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
a : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
a : List[Any] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
a : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
a : Optional[Any] = 0
if t > 0:
a : List[Any] = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=__UpperCAmelCase , device=model_output.device)
a : Any = self._get_variance(
__UpperCAmelCase , predicted_variance=__UpperCAmelCase , prev_timestep=__UpperCAmelCase , )
if self.variance_type == "fixed_small_log":
a : int = variance
elif self.variance_type == "learned_range":
a : Any = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
" for the UnCLIPScheduler.")
a : List[str] = variance * variance_noise
a : List[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=__UpperCAmelCase , pred_original_sample=__UpperCAmelCase)
def __snake_case ( self : int , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : torch.IntTensor , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
a : Dict = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype)
a : Union[str, Any] = timesteps.to(original_samples.device)
a : int = alphas_cumprod[timesteps] ** 0.5
a : Optional[Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
a : Any = sqrt_alpha_prod.unsqueeze(-1)
a : str = (1 - alphas_cumprod[timesteps]) ** 0.5
a : Tuple = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
a : Optional[int] = sqrt_one_minus_alpha_prod.unsqueeze(-1)
a : List[Any] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 40
|
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowercase ( A_ , A_ , A_ = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(A_ ), magnitude * sin(A_ )]
return [magnitude * cos(radians(A_ ) ), magnitude * sin(radians(A_ ) )]
def lowercase ( A_ , A_ , A_ = 10**-1 )-> bool:
'''simple docstring'''
a : NDArray[floataa] = cross(A_ , A_ )
a : float = sum(A_ )
return abs(A_ ) < eps
if __name__ == "__main__":
# Test to check if it works
__lowercase = array(
[
polar_force(7_18.4, 180 - 30),
polar_force(8_79.54, 45),
polar_force(100, -90),
]
)
__lowercase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__lowercase = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
__lowercase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__lowercase = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
__lowercase = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 40
| 1
|
"""simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _A :
"""simple docstring"""
def __init__( self : Optional[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int):
if dst_width < 0 or dst_height < 0:
raise ValueError("Destination width/height should be > 0")
a : Union[str, Any] = img
a : Optional[int] = img.shape[1]
a : Dict = img.shape[0]
a : Dict = dst_width
a : Any = dst_height
a : str = self.src_w / self.dst_w
a : Dict = self.src_h / self.dst_h
a : Dict = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 255
)
def __snake_case ( self : Tuple):
for i in range(self.dst_h):
for j in range(self.dst_w):
a : List[str] = self.img[self.get_y(__UpperCAmelCase)][self.get_x(__UpperCAmelCase)]
def __snake_case ( self : Dict , __UpperCAmelCase : int):
return int(self.ratio_x * x)
def __snake_case ( self : Optional[int] , __UpperCAmelCase : int):
return int(self.ratio_y * y)
if __name__ == "__main__":
__lowercase , __lowercase = 800, 600
__lowercase = imread("""image_data/lena.jpg""", 1)
__lowercase = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 40
|
"""simple docstring"""
def lowercase ( A_ , A_ )-> float:
'''simple docstring'''
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(A_ ) * abs(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 40
| 1
|
"""simple docstring"""
from string import ascii_uppercase
__lowercase = {str(ord(c) - 55): c for c in ascii_uppercase}
def lowercase ( A_ , A_ )-> str:
'''simple docstring'''
if isinstance(A_ , A_ ):
raise TypeError("int() can't convert non-string with explicit base" )
if num < 0:
raise ValueError("parameter must be positive int" )
if isinstance(A_ , A_ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if isinstance(A_ , A_ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if base in (0, 1):
raise ValueError("base must be >= 2" )
if base > 36:
raise ValueError("base must be <= 36" )
a : Tuple = ""
a : Optional[Any] = 0
a : Optional[Any] = 0
while div != 1:
a , a : Union[str, Any] = divmod(A_ , A_ )
if base >= 11 and 9 < mod < 36:
a : Any = ALPHABET_VALUES[str(A_ )]
else:
a : Optional[Any] = str(A_ )
new_value += actual_value
a : Union[str, Any] = num // base
a : Union[str, Any] = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(A_ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 40
|
"""simple docstring"""
import os
import sys
import unittest
__lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowercase = os.path.join(git_repo_path, """src""", """diffusers""")
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Any):
a : List[Any] = find_backend(" if not is_torch_available():")
self.assertEqual(__UpperCAmelCase , "torch")
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
a : Dict = find_backend(" if not (is_torch_available() and is_transformers_available()):")
self.assertEqual(__UpperCAmelCase , "torch_and_transformers")
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
a : int = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):")
self.assertEqual(__UpperCAmelCase , "torch_and_transformers_and_onnx")
def __snake_case ( self : Union[str, Any]):
a : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , __UpperCAmelCase)
self.assertIn("torch_and_transformers" , __UpperCAmelCase)
self.assertIn("flax_and_transformers" , __UpperCAmelCase)
self.assertIn("torch_and_transformers_and_onnx" , __UpperCAmelCase)
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"])
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"])
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"])
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"])
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"])
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"])
def __snake_case ( self : Tuple):
a : Optional[int] = create_dummy_object("CONSTANT" , "'torch'")
self.assertEqual(__UpperCAmelCase , "\nCONSTANT = None\n")
a : Dict = create_dummy_object("function" , "'torch'")
self.assertEqual(
__UpperCAmelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n")
a : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
a : int = create_dummy_object("FakeClass" , "'torch'")
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : List[str]):
a : List[str] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
a : Tuple = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]})
self.assertEqual(dummy_files["torch"] , __UpperCAmelCase)
| 40
| 1
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__lowercase = logging.get_logger(__name__)
__lowercase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowercase = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
__lowercase = {
"""bert-base-uncased""": 512,
"""bert-large-uncased""": 512,
"""bert-base-cased""": 512,
"""bert-large-cased""": 512,
"""bert-base-multilingual-uncased""": 512,
"""bert-base-multilingual-cased""": 512,
"""bert-base-chinese""": 512,
"""bert-base-german-cased""": 512,
"""bert-large-uncased-whole-word-masking""": 512,
"""bert-large-cased-whole-word-masking""": 512,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 512,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 512,
"""bert-base-cased-finetuned-mrpc""": 512,
"""bert-base-german-dbmdz-cased""": 512,
"""bert-base-german-dbmdz-uncased""": 512,
"""TurkuNLP/bert-base-finnish-cased-v1""": 512,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 512,
"""wietsedv/bert-base-dutch-cased""": 512,
}
__lowercase = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : List[Any] = VOCAB_FILES_NAMES
UpperCAmelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Tuple = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : List[str] = BertTokenizer
def __init__( self : Dict , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : Any=True , __UpperCAmelCase : Tuple="[UNK]" , __UpperCAmelCase : Union[str, Any]="[SEP]" , __UpperCAmelCase : List[Any]="[PAD]" , __UpperCAmelCase : int="[CLS]" , __UpperCAmelCase : List[str]="[MASK]" , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : int=None , **__UpperCAmelCase : Optional[Any] , ):
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
a : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , __UpperCAmelCase) != do_lower_case
or normalizer_state.get("strip_accents" , __UpperCAmelCase) != strip_accents
or normalizer_state.get("handle_chinese_chars" , __UpperCAmelCase) != tokenize_chinese_chars
):
a : Tuple = getattr(__UpperCAmelCase , normalizer_state.pop("type"))
a : Optional[int] = do_lower_case
a : Optional[int] = strip_accents
a : Union[str, Any] = tokenize_chinese_chars
a : Union[str, Any] = normalizer_class(**__UpperCAmelCase)
a : Optional[Any] = do_lower_case
def __snake_case ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Any=None):
a : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case ( self : List[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None):
a : Tuple = [self.sep_token_id]
a : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def __snake_case ( self : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None):
a : Dict = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase)
return tuple(__UpperCAmelCase)
| 40
|
"""simple docstring"""
__lowercase = {
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 40
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowercase ( A_ , A_ , A_ , A_ )-> Optional[Any]: # noqa: E741
'''simple docstring'''
while r - l > 1:
a : Tuple = (l + r) // 2
if v[m] >= key:
a : str = m
else:
a : Tuple = m # noqa: E741
return r
def lowercase ( A_ )-> int:
'''simple docstring'''
if len(A_ ) == 0:
return 0
a : str = [0] * len(A_ )
a : List[Any] = 1
a : List[Any] = v[0]
for i in range(1 , len(A_ ) ):
if v[i] < tail[0]:
a : List[str] = v[i]
elif v[i] > tail[length - 1]:
a : Optional[Any] = v[i]
length += 1
else:
a : List[Any] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40
| 1
|
"""simple docstring"""
from timeit import timeit
__lowercase = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def lowercase ( A_ )-> bool:
'''simple docstring'''
a : List[str] = 0
a : Optional[Any] = len(A_ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def lowercase ( A_ )-> bool:
'''simple docstring'''
a : List[Any] = len(A_ ) // 2
a : Optional[Any] = len(A_ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(A_ ) )
def lowercase ( A_ )-> bool:
'''simple docstring'''
if len(A_ ) <= 2:
return True
if s[0] == s[len(A_ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def lowercase ( A_ )-> bool:
'''simple docstring'''
return s == s[::-1]
def lowercase ( A_ )-> None:
'''simple docstring'''
a : Tuple = F'''all({name}(key) is value for key, value in test_data.items())'''
a : Union[str, Any] = F'''from __main__ import test_data, {name}'''
a : List[str] = 500_000
a : Optional[int] = timeit(stmt=A_ , setup=A_ , number=A_ )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f'''{key:21} {value}''')
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""")
| 40
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : str = """naver-clova-ix/donut-base-finetuned-docvqa"""
UpperCAmelCase : Tuple = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
UpperCAmelCase : List[str] = """document_qa"""
UpperCAmelCase : str = AutoProcessor
UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel
UpperCAmelCase : int = ["""image""", """text"""]
UpperCAmelCase : int = ["""text"""]
def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any):
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.")
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
def __snake_case ( self : Tuple , __UpperCAmelCase : "Image" , __UpperCAmelCase : str):
a : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
a : Union[str, Any] = task_prompt.replace("{user_input}" , __UpperCAmelCase)
a : Optional[Any] = self.pre_processor.tokenizer(
__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt").input_ids
a : Any = self.pre_processor(__UpperCAmelCase , return_tensors="pt").pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __snake_case ( self : int , __UpperCAmelCase : int):
return self.model.generate(
inputs["pixel_values"].to(self.device) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences
def __snake_case ( self : str , __UpperCAmelCase : List[Any]):
a : Union[str, Any] = self.pre_processor.batch_decode(__UpperCAmelCase)[0]
a : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "")
a : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "")
a : Optional[Any] = re.sub(r"<.*?>" , "" , __UpperCAmelCase , count=1).strip() # remove first task start token
a : List[str] = self.pre_processor.tokenajson(__UpperCAmelCase)
return sequence["answer"]
| 40
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowercase = logging.get_logger(__name__)
class _A ( _a ,_a ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = """maskformer-swin"""
UpperCAmelCase : Optional[int] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Any , __UpperCAmelCase : List[Any]=224 , __UpperCAmelCase : Dict=4 , __UpperCAmelCase : int=3 , __UpperCAmelCase : int=96 , __UpperCAmelCase : Any=[2, 2, 6, 2] , __UpperCAmelCase : Tuple=[3, 6, 12, 24] , __UpperCAmelCase : Tuple=7 , __UpperCAmelCase : Dict=4.0 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : Optional[Any]=0.0 , __UpperCAmelCase : List[Any]=0.1 , __UpperCAmelCase : List[Any]="gelu" , __UpperCAmelCase : Any=False , __UpperCAmelCase : Optional[int]=0.02 , __UpperCAmelCase : Dict=1e-5 , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : str=None , **__UpperCAmelCase : List[str] , ):
super().__init__(**__UpperCAmelCase)
a : int = image_size
a : str = patch_size
a : Optional[int] = num_channels
a : str = embed_dim
a : int = depths
a : Dict = len(__UpperCAmelCase)
a : Dict = num_heads
a : Union[str, Any] = window_size
a : Optional[Any] = mlp_ratio
a : Any = qkv_bias
a : str = hidden_dropout_prob
a : List[str] = attention_probs_dropout_prob
a : Optional[int] = drop_path_rate
a : List[str] = hidden_act
a : int = use_absolute_embeddings
a : int = layer_norm_eps
a : List[str] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a : Dict = int(embed_dim * 2 ** (len(__UpperCAmelCase) - 1))
a : List[Any] = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(__UpperCAmelCase) + 1)]
a , a : int = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names)
| 40
|
"""simple docstring"""
from __future__ import annotations
class _A :
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : int = 0):
a : Tuple = key
def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
a : Dict = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__UpperCAmelCase) ^ key) for ch in content]
def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__UpperCAmelCase) ^ key) for ch in content]
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
a : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
a : Any = ""
for ch in content:
ans += chr(ord(__UpperCAmelCase) ^ key)
return ans
def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
a : Dict = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
a : str = ""
for ch in content:
ans += chr(ord(__UpperCAmelCase) ^ key)
return ans
def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int = 0):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
try:
with open(__UpperCAmelCase) as fin, open("encrypt.out" , "w+") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__UpperCAmelCase , __UpperCAmelCase))
except OSError:
return False
return True
def __snake_case ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : int):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
try:
with open(__UpperCAmelCase) as fin, open("decrypt.out" , "w+") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__UpperCAmelCase , __UpperCAmelCase))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 40
| 1
|
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowercase ( A_ )-> int:
'''simple docstring'''
a : Any = filter(lambda A_ : p.requires_grad , model.parameters() )
a : Optional[int] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__lowercase = logging.getLogger(__name__)
def lowercase ( A_ , A_ )-> Dict:
'''simple docstring'''
if metric == "rouge2":
a : Union[str, Any] = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
a : Any = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
a : Optional[int] = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
a : Optional[Any] = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
" function." )
a : Union[str, Any] = ModelCheckpoint(
dirpath=A_ , filename=A_ , monitor=F'''val_{metric}''' , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def lowercase ( A_ , A_ )-> List[str]:
'''simple docstring'''
return EarlyStopping(
monitor=F'''val_{metric}''' , mode="min" if "loss" in metric else "max" , patience=A_ , verbose=A_ , )
class _A ( pl.Callback ):
"""simple docstring"""
def __snake_case ( self : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : int):
a : Optional[Any] = {f'''lr_group_{i}''': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)}
pl_module.logger.log_metrics(__UpperCAmelCase)
@rank_zero_only
def __snake_case ( self : Tuple , __UpperCAmelCase : pl.Trainer , __UpperCAmelCase : pl.LightningModule , __UpperCAmelCase : str , __UpperCAmelCase : Any=True):
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''')
a : Dict = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]})
# Log results
a : Optional[Any] = Path(pl_module.hparams.output_dir)
if type_path == "test":
a : str = od / "test_results.txt"
a : str = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
a : str = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
a : List[Any] = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=__UpperCAmelCase)
generations_file.parent.mkdir(exist_ok=__UpperCAmelCase)
with open(__UpperCAmelCase , "a+") as writer:
for key in sorted(__UpperCAmelCase):
if key in ["log", "progress_bar", "preds"]:
continue
a : List[str] = metrics[key]
if isinstance(__UpperCAmelCase , torch.Tensor):
a : str = val.item()
a : Dict = f'''{key}: {val:.6f}\n'''
writer.write(__UpperCAmelCase)
if not save_generations:
return
if "preds" in metrics:
a : Any = "\n".join(metrics["preds"])
generations_file.open("w+").write(__UpperCAmelCase)
@rank_zero_only
def __snake_case ( self : Any , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict):
try:
a : Optional[Any] = pl_module.model.model.num_parameters()
except AttributeError:
a : Any = pl_module.model.num_parameters()
a : Dict = count_trainable_parameters(__UpperCAmelCase)
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6})
@rank_zero_only
def __snake_case ( self : str , __UpperCAmelCase : pl.Trainer , __UpperCAmelCase : pl.LightningModule):
save_json(pl_module.metrics , pl_module.metrics_save_path)
return self._write_logs(__UpperCAmelCase , __UpperCAmelCase , "test")
@rank_zero_only
def __snake_case ( self : Dict , __UpperCAmelCase : pl.Trainer , __UpperCAmelCase : Union[str, Any]):
save_json(pl_module.metrics , pl_module.metrics_save_path)
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 40
|
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowercase ( A_ )-> List[Any]:
'''simple docstring'''
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
class _A :
"""simple docstring"""
def __init__( self : str , __UpperCAmelCase : int):
a : List[Any] = metric_id
class _A :
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = [MetricMock(_a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def __snake_case ( self : List[str]):
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Any:
'''simple docstring'''
if "tmp_path" in args:
a : Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(A_ , match="https://huggingface.co/docs/evaluate" ):
func(*A_ )
| 40
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _A ( metaclass=_a ):
"""simple docstring"""
UpperCAmelCase : Any = ["""torch""", """torchsde"""]
def __init__( self : Dict , *__UpperCAmelCase : Dict , **__UpperCAmelCase : Optional[Any]):
requires_backends(self , ["torch", "torchsde"])
@classmethod
def __snake_case ( cls : Optional[Any] , *__UpperCAmelCase : Dict , **__UpperCAmelCase : Union[str, Any]):
requires_backends(cls , ["torch", "torchsde"])
@classmethod
def __snake_case ( cls : Tuple , *__UpperCAmelCase : int , **__UpperCAmelCase : Dict):
requires_backends(cls , ["torch", "torchsde"])
| 40
|
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
__lowercase = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
__lowercase = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowercase ( A_ )-> list[list[int]]:
'''simple docstring'''
a : str = []
for i in range(len(A_ ) ):
a : str = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
a : Union[str, Any] = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
a : Tuple = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A_ )
return next_generation
def lowercase ( A_ , A_ )-> list[Image.Image]:
'''simple docstring'''
a : List[str] = []
for _ in range(A_ ):
# Create output image
a : str = Image.new("RGB" , (len(cells[0] ), len(A_ )) )
a : Union[str, Any] = img.load()
# Save cells to image
for x in range(len(A_ ) ):
for y in range(len(cells[0] ) ):
a : Optional[Any] = 255 - cells[y][x] * 255
a : str = (colour, colour, colour)
# Save image
images.append(A_ )
a : Tuple = new_generation(A_ )
return images
if __name__ == "__main__":
__lowercase = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 40
| 1
|
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def lowercase ( A_ , A_ , A_ , A_ )-> Union[str, Any]:
'''simple docstring'''
a : Any = sorted(zip(A_ , A_ ) , key=lambda A_ : x[0] / x[1] , reverse=A_ )
a , a : int = [i[0] for i in r], [i[1] for i in r]
a : Union[str, Any] = list(accumulate(A_ ) )
a : Optional[Any] = bisect(A_ , A_ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
"""simple docstring"""
from itertools import permutations
def lowercase ( A_ )-> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
a : Optional[int] = [7, 11, 13, 17]
for i, test in enumerate(A_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase ( A_ = 10 )-> int:
'''simple docstring'''
return sum(
int("".join(map(A_ , A_ ) ) )
for num in permutations(range(A_ ) )
if is_substring_divisible(A_ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 40
| 1
|
"""simple docstring"""
def lowercase ( A_ , A_ )-> int:
'''simple docstring'''
a : int = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
a : str = n - k
# Calculate C(n,k)
for i in range(A_ ):
result *= n - i
result //= i + 1
return result
def lowercase ( A_ )-> int:
'''simple docstring'''
return binomial_coefficient(2 * node_count , A_ ) // (node_count + 1)
def lowercase ( A_ )-> int:
'''simple docstring'''
if n < 0:
raise ValueError("factorial() not defined for negative values" )
a : List[Any] = 1
for i in range(1 , n + 1 ):
result *= i
return result
def lowercase ( A_ )-> int:
'''simple docstring'''
return catalan_number(A_ ) * factorial(A_ )
if __name__ == "__main__":
__lowercase = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 40
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Dict = KandinskyVaaControlnetPipeline
UpperCAmelCase : List[str] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
UpperCAmelCase : Optional[Any] = ["""image_embeds""", """negative_image_embeds""", """hint"""]
UpperCAmelCase : Dict = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCAmelCase : Optional[int] = False
@property
def __snake_case ( self : Optional[Any]):
return 32
@property
def __snake_case ( self : Dict):
return 32
@property
def __snake_case ( self : Dict):
return self.time_input_dim
@property
def __snake_case ( self : Any):
return self.time_input_dim * 4
@property
def __snake_case ( self : str):
return 100
@property
def __snake_case ( self : str):
torch.manual_seed(0)
a : str = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
a : Dict = UNetaDConditionModel(**__UpperCAmelCase)
return model
@property
def __snake_case ( self : str):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __snake_case ( self : Union[str, Any]):
torch.manual_seed(0)
a : Dict = VQModel(**self.dummy_movq_kwargs)
return model
def __snake_case ( self : Optional[Any]):
a : Optional[Any] = self.dummy_unet
a : int = self.dummy_movq
a : str = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=__UpperCAmelCase , )
a : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : int=0):
a : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase)
a : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1)).to(
__UpperCAmelCase)
# create hint
a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase)
if str(__UpperCAmelCase).startswith("mps"):
a : Union[str, Any] = torch.manual_seed(__UpperCAmelCase)
else:
a : List[Any] = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase)
a : str = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __snake_case ( self : Dict):
a : str = "cpu"
a : Tuple = self.get_dummy_components()
a : Dict = self.pipeline_class(**__UpperCAmelCase)
a : Optional[int] = pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a : Optional[Any] = pipe(**self.get_dummy_inputs(__UpperCAmelCase))
a : Any = output.images
a : Any = pipe(
**self.get_dummy_inputs(__UpperCAmelCase) , return_dict=__UpperCAmelCase , )[0]
a : Union[str, Any] = image[0, -3:, -3:, -1]
a : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a : Tuple = np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Optional[int]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : List[str]):
a : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy")
a : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png")
a : List[Any] = torch.from_numpy(np.array(__UpperCAmelCase)).float() / 255.0
a : str = hint.permute(2 , 0 , 1).unsqueeze(0)
a : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa)
pipe_prior.to(__UpperCAmelCase)
a : List[str] = KandinskyVaaControlnetPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa)
a : int = pipeline.to(__UpperCAmelCase)
pipeline.set_progress_bar_config(disable=__UpperCAmelCase)
a : Tuple = "A robot, 4k photo"
a : Any = torch.Generator(device="cuda").manual_seed(0)
a , a : int = pipe_prior(
__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
a : str = torch.Generator(device="cuda").manual_seed(0)
a : Union[str, Any] = pipeline(
image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , hint=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , output_type="np" , )
a : str = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase)
| 40
| 1
|
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__lowercase = {
"""User-Agent""": """Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"""
""" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"""
}
def lowercase ( A_ = "dhaka" , A_ = 5 )-> int:
'''simple docstring'''
a : str = min(A_ , 50 ) # Prevent abuse!
a : Optional[Any] = {
"q": query,
"tbm": "isch",
"hl": "en",
"ijn": "0",
}
a : Tuple = requests.get("https://www.google.com/search" , params=A_ , headers=A_ )
a : Union[str, Any] = BeautifulSoup(html.text , "html.parser" )
a : Union[str, Any] = "".join(
re.findall(R"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) )
a : Any = json.dumps(A_ )
a : Tuple = json.loads(A_ )
a : List[str] = re.findall(
R"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , A_ , )
if not matched_google_image_data:
return 0
a : Optional[Any] = re.sub(
R"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(A_ ) , )
a : Any = re.findall(
R"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , A_ , )
for index, fixed_full_res_image in enumerate(A_ ):
if index >= max_images:
return index
a : Optional[Any] = bytes(A_ , "ascii" ).decode(
"unicode-escape" )
a : int = bytes(A_ , "ascii" ).decode(
"unicode-escape" )
a : Any = urllib.request.build_opener()
a : Dict = [
(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582",
)
]
urllib.request.install_opener(A_ )
a : List[Any] = F'''query_{query.replace(" " , "_" )}'''
if not os.path.exists(A_ ):
os.makedirs(A_ )
urllib.request.urlretrieve( # noqa: S310
A_ , F'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
__lowercase = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print("""Please provide a search term.""")
raise
| 40
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def lowercase ( A_ )-> Dict:
'''simple docstring'''
a : str = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
a : Union[str, Any] = 128
elif "12-12" in model_name:
a : List[Any] = 12
a : str = 12
elif "14-14" in model_name:
a : List[Any] = 14
a : Optional[int] = 14
elif "16-16" in model_name:
a : Any = 16
a : List[Any] = 16
else:
raise ValueError("Model not supported" )
a : Optional[int] = "huggingface/label-files"
if "speech-commands" in model_name:
a : Optional[int] = 35
a : List[str] = "speech-commands-v2-id2label.json"
else:
a : Optional[Any] = 527
a : Tuple = "audioset-id2label.json"
a : List[str] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
a : Union[str, Any] = {int(A_ ): v for k, v in idalabel.items()}
a : Any = idalabel
a : str = {v: k for k, v in idalabel.items()}
return config
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
if "module.v" in name:
a : Union[str, Any] = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
a : List[Any] = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
a : Union[str, Any] = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
a : str = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
a : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
a : Union[str, Any] = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
a : str = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
a : Tuple = name.replace("attn" , "attention.self" )
if "norm1" in name:
a : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
a : Union[str, Any] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
a : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a : Optional[Any] = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
a : Tuple = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
a : List[str] = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
a : Optional[int] = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def lowercase ( A_ , A_ )-> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
a : str = orig_state_dict.pop(A_ )
if "qkv" in key:
a : int = key.split("." )
a : Optional[int] = int(key_split[3] )
a : int = config.hidden_size
if "weight" in key:
a : List[str] = val[:dim, :]
a : Any = val[dim : dim * 2, :]
a : int = val[-dim:, :]
else:
a : Optional[Any] = val[:dim]
a : Union[str, Any] = val[dim : dim * 2]
a : str = val[-dim:]
else:
a : str = val
return orig_state_dict
def lowercase ( A_ )-> Dict:
'''simple docstring'''
a : Union[str, Any] = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(A_ , A_ )
@torch.no_grad()
def lowercase ( A_ , A_ , A_=False )-> Optional[int]:
'''simple docstring'''
a : Optional[int] = get_audio_spectrogram_transformer_config(A_ )
a : Dict = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
a : Any = model_name_to_url[model_name]
a : List[Any] = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" )
# remove some keys
remove_keys(A_ )
# rename some keys
a : Union[str, Any] = convert_state_dict(A_ , A_ )
# load 🤗 model
a : List[str] = ASTForAudioClassification(A_ )
model.eval()
model.load_state_dict(A_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
a : Tuple = -4.2_6_7_7_3_9_3 if "speech-commands" not in model_name else -6.8_4_5_9_7_8
a : Union[str, Any] = 4.5_6_8_9_9_7_4 if "speech-commands" not in model_name else 5.5_6_5_4_5_2_6
a : str = 1_024 if "speech-commands" not in model_name else 128
a : List[Any] = ASTFeatureExtractor(mean=A_ , std=A_ , max_length=A_ )
if "speech-commands" in model_name:
a : List[str] = load_dataset("speech_commands" , "v0.02" , split="validation" )
a : int = dataset[0]["audio"]["array"]
else:
a : Tuple = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
a , a : Tuple = torchaudio.load(A_ )
a : Optional[Any] = waveform.squeeze().numpy()
a : Union[str, Any] = feature_extractor(A_ , sampling_rate=16_000 , return_tensors="pt" )
# forward pass
a : Optional[Any] = model(**A_ )
a : List[str] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
a : Any = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
a : Optional[int] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
a : List[str] = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
a : Tuple = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
a : int = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
a : Any = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
a : Dict = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] )
elif model_name == "ast-finetuned-speech-commands-v2":
a : Union[str, Any] = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , A_ , atol=1e-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(A_ ).mkdir(exist_ok=A_ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(A_ )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowercase = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 40
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : List[str] = """audio-spectrogram-transformer"""
def __init__( self : List[Any] , __UpperCAmelCase : str=768 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : Optional[int]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : List[Any]="gelu" , __UpperCAmelCase : Tuple=0.0 , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : List[str]=0.02 , __UpperCAmelCase : int=1e-12 , __UpperCAmelCase : str=16 , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Any=10 , __UpperCAmelCase : List[Any]=10 , __UpperCAmelCase : List[str]=1024 , __UpperCAmelCase : str=128 , **__UpperCAmelCase : Dict , ):
super().__init__(**__UpperCAmelCase)
a : Any = hidden_size
a : Tuple = num_hidden_layers
a : Any = num_attention_heads
a : Optional[Any] = intermediate_size
a : str = hidden_act
a : Tuple = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : Optional[int] = initializer_range
a : Any = layer_norm_eps
a : Optional[int] = patch_size
a : Optional[Any] = qkv_bias
a : Optional[Any] = frequency_stride
a : Optional[Any] = time_stride
a : Tuple = max_length
a : Optional[Any] = num_mel_bins
| 40
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase = {
"""configuration_rag""": ["""RagConfig"""],
"""retrieval_rag""": ["""RagRetriever"""],
"""tokenization_rag""": ["""RagTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""RagModel""",
"""RagPreTrainedModel""",
"""RagSequenceForGeneration""",
"""RagTokenForGeneration""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TFRagModel""",
"""TFRagPreTrainedModel""",
"""TFRagSequenceForGeneration""",
"""TFRagTokenForGeneration""",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__lowercase = 250004
__lowercase = 250020
@require_sentencepiece
@require_tokenizers
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = MBartTokenizer
UpperCAmelCase : Dict = MBartTokenizerFast
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : str = True
def __snake_case ( self : List[Any]):
super().setUp()
# We have a SentencePiece fixture for testing
a : Any = MBartTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def __snake_case ( self : List[Any]):
a : str = MBartTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase)
a : Union[str, Any] = tokenizer.tokenize("This is a test")
self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a : int = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a : Optional[Any] = tokenizer.convert_tokens_to_ids(__UpperCAmelCase)
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
a : Tuple = tokenizer.convert_ids_to_tokens(__UpperCAmelCase)
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def __snake_case ( self : Any):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
a : Optional[Any] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
a : Dict = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase)
a : List[Any] = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase)
a : Optional[int] = tempfile.mkdtemp()
a : Optional[int] = tokenizer_r.save_pretrained(__UpperCAmelCase)
a : List[Any] = tokenizer_p.save_pretrained(__UpperCAmelCase)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
a : Optional[int] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f)
self.assertSequenceEqual(__UpperCAmelCase , __UpperCAmelCase)
# Checks everything loads correctly in the same way
a : Dict = tokenizer_r.from_pretrained(__UpperCAmelCase)
a : int = tokenizer_p.from_pretrained(__UpperCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCAmelCase , __UpperCAmelCase))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__UpperCAmelCase)
# Save tokenizer rust, legacy_format=True
a : Optional[Any] = tempfile.mkdtemp()
a : Tuple = tokenizer_r.save_pretrained(__UpperCAmelCase , legacy_format=__UpperCAmelCase)
a : List[str] = tokenizer_p.save_pretrained(__UpperCAmelCase)
# Checks it save with the same files
self.assertSequenceEqual(__UpperCAmelCase , __UpperCAmelCase)
# Checks everything loads correctly in the same way
a : Dict = tokenizer_r.from_pretrained(__UpperCAmelCase)
a : Optional[Any] = tokenizer_p.from_pretrained(__UpperCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCAmelCase , __UpperCAmelCase))
shutil.rmtree(__UpperCAmelCase)
# Save tokenizer rust, legacy_format=False
a : Dict = tempfile.mkdtemp()
a : List[str] = tokenizer_r.save_pretrained(__UpperCAmelCase , legacy_format=__UpperCAmelCase)
a : Optional[int] = tokenizer_p.save_pretrained(__UpperCAmelCase)
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
a : Any = tokenizer_r.from_pretrained(__UpperCAmelCase)
a : Any = tokenizer_p.from_pretrained(__UpperCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCAmelCase , __UpperCAmelCase))
shutil.rmtree(__UpperCAmelCase)
@require_torch
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : List[str] = """facebook/mbart-large-en-ro"""
UpperCAmelCase : Optional[Any] = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
UpperCAmelCase : str = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
UpperCAmelCase : str = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def __snake_case ( cls : str):
a : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO")
a : Optional[int] = 1
return cls
def __snake_case ( self : Optional[Any]):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020)
def __snake_case ( self : Union[str, Any]):
a : Union[str, Any] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase)
def __snake_case ( self : int):
self.assertIn(__UpperCAmelCase , self.tokenizer.all_special_ids)
a : Tuple = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
a : List[Any] = self.tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase)
a : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCAmelCase)
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
self.assertNotIn(self.tokenizer.eos_token , __UpperCAmelCase)
def __snake_case ( self : List[str]):
a : List[Any] = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , __UpperCAmelCase)
a : Tuple = 10
a : Dict = self.tokenizer(__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase).input_ids[0]
self.assertEqual(ids[-2] , 2)
self.assertEqual(ids[-1] , __UpperCAmelCase)
self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase)
def __snake_case ( self : List[str]):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]) , [250026, 250001])
def __snake_case ( self : str):
a : List[str] = tempfile.mkdtemp()
a : Optional[int] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__UpperCAmelCase)
a : Dict = MBartTokenizer.from_pretrained(__UpperCAmelCase)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __UpperCAmelCase)
@require_torch
def __snake_case ( self : Dict):
a : Union[str, Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__UpperCAmelCase , return_tensors="pt")
a : Tuple = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __snake_case ( self : List[Any]):
a : List[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=len(self.expected_src_tokens) , return_tensors="pt" , )
a : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase)
self.assertEqual((2, 14) , batch.input_ids.shape)
self.assertEqual((2, 14) , batch.attention_mask.shape)
a : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase)
self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE])
def __snake_case ( self : Optional[int]):
a : List[str] = self.tokenizer(self.src_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=3 , return_tensors="pt")
a : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=10 , return_tensors="pt")
a : str = targets["input_ids"]
a : Optional[Any] = shift_tokens_right(__UpperCAmelCase , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 10)
@require_torch
def __snake_case ( self : Any):
a : Tuple = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR")
self.assertEqual(
nested_simplify(__UpperCAmelCase) , {
# A, test, EOS, en_XX
"input_ids": [[62, 3034, 2, 250004]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250001,
} , )
| 40
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _A ( _a ,_a ,_a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : str = StableDiffusionInpaintPipeline
UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase : Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase : int = frozenset([] )
def __snake_case ( self : Dict):
torch.manual_seed(0)
a : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , )
a : Tuple = PNDMScheduler(skip_prk_steps=__UpperCAmelCase)
torch.manual_seed(0)
a : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
a : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
a : Any = CLIPTextModel(__UpperCAmelCase)
a : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
a : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any]=0):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase)
a : List[str] = image.cpu().permute(0 , 2 , 3 , 1)[0]
a : Union[str, Any] = Image.fromarray(np.uinta(__UpperCAmelCase)).convert("RGB").resize((64, 64))
a : Dict = Image.fromarray(np.uinta(image + 4)).convert("RGB").resize((64, 64))
if str(__UpperCAmelCase).startswith("mps"):
a : Tuple = torch.manual_seed(__UpperCAmelCase)
else:
a : Tuple = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase)
a : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __snake_case ( self : List[str]):
a : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
a : Tuple = self.get_dummy_components()
a : Optional[int] = StableDiffusionInpaintPipeline(**__UpperCAmelCase)
a : int = sd_pipe.to(__UpperCAmelCase)
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a : Any = self.get_dummy_inputs(__UpperCAmelCase)
a : Optional[int] = sd_pipe(**__UpperCAmelCase).images
a : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a : int = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __snake_case ( self : str):
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Union[str, Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Dict):
a : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
a : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
a : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy")
a : Tuple = "stabilityai/stable-diffusion-2-inpainting"
a : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(__UpperCAmelCase , safety_checker=__UpperCAmelCase)
pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
pipe.enable_attention_slicing()
a : Any = "Face of a yellow cat, high resolution, sitting on a park bench"
a : str = torch.manual_seed(0)
a : Union[str, Any] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 9e-3
def __snake_case ( self : Any):
a : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
a : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
a : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy")
a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting"
a : Any = StableDiffusionInpaintPipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=__UpperCAmelCase , )
pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
pipe.enable_attention_slicing()
a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench"
a : Dict = torch.manual_seed(0)
a : List[Any] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , )
a : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5e-1
def __snake_case ( self : int):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
a : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting"
a : Optional[int] = PNDMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler")
a : int = StableDiffusionInpaintPipeline.from_pretrained(
__UpperCAmelCase , safety_checker=__UpperCAmelCase , scheduler=__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench"
a : Optional[int] = torch.manual_seed(0)
a : str = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="np" , )
a : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 40
| 1
|
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class _A :
"""simple docstring"""
def __init__( self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any=13 , __UpperCAmelCase : Tuple=7 , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : int=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : int=99 , __UpperCAmelCase : Dict=32 , __UpperCAmelCase : Union[str, Any]=5 , __UpperCAmelCase : str=4 , __UpperCAmelCase : Optional[Any]=4 , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : Union[str, Any]=0.0 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : int=True , __UpperCAmelCase : str=512 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : Optional[Any]=2 , __UpperCAmelCase : int=0.02 , __UpperCAmelCase : List[Any]=3 , __UpperCAmelCase : Union[str, Any]=4 , __UpperCAmelCase : List[str]=None , ):
a : str = parent
a : Union[str, Any] = batch_size
a : Optional[Any] = seq_length
a : int = is_training
a : int = use_input_mask
a : Optional[Any] = use_token_type_ids
a : Any = use_labels
a : Optional[int] = vocab_size
a : Optional[int] = hidden_size
a : Any = num_hidden_layers
a : str = num_attention_heads
a : int = intermediate_multiple_size
a : Any = hidden_act
a : Union[str, Any] = hidden_dropout
a : int = attention_dropout
a : str = weight_tying
a : Optional[int] = max_position_embeddings
a : Optional[int] = type_vocab_size
a : int = type_sequence_label_size
a : Optional[int] = initializer_range
a : str = num_labels
a : List[Any] = num_choices
a : Any = scope
def __snake_case ( self : List[str]):
a : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : Union[str, Any] = None
if self.use_input_mask:
a : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length])
a : Union[str, Any] = None
if self.use_labels:
a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : Any = self.get_config()
return config, input_ids, input_mask, token_labels
def __snake_case ( self : int):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def __snake_case ( self : Union[str, Any]):
a , a , a , a : Tuple = self.prepare_config_and_inputs()
a : str = True
return config, input_ids, input_mask, token_labels
def __snake_case ( self : Any , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str]):
a : str = GPTNeoXJapaneseModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : str = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)
a : Any = model(__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : str):
a : Optional[Any] = True
a : Dict = GPTNeoXJapaneseModel(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int]):
a : List[Any] = GPTNeoXJapaneseForCausalLM(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __snake_case ( self : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Union[str, Any]):
a : List[Any] = True
a : Any = GPTNeoXJapaneseForCausalLM(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
# first forward pass
a : Optional[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , use_cache=__UpperCAmelCase)
a : Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size)
a : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
a : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
a : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1)
a : Optional[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase)
a : List[Any] = output_from_no_past["hidden_states"][0]
a : Tuple = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , past_key_values=__UpperCAmelCase , output_hidden_states=__UpperCAmelCase , )["hidden_states"][0]
# select random slice
a : List[Any] = ids_tensor((1,) , output_from_past.shape[-1]).item()
a : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
a : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3))
def __snake_case ( self : Optional[int]):
a : Tuple = self.prepare_config_and_inputs()
a , a , a , a : Any = config_and_inputs
a : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _A ( _a ,_a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : int = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
UpperCAmelCase : List[Any] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
UpperCAmelCase : List[Any] = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
UpperCAmelCase : str = False
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Dict = False
def __snake_case ( self : Any):
a : Optional[int] = GPTNeoXJapaneseModelTester(self)
a : Optional[int] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def __snake_case ( self : List[str]):
self.config_tester.run_common_tests()
def __snake_case ( self : Tuple):
a , a , a , a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : Dict):
a , a , a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : Optional[int]):
# This regression test was failing with PyTorch < 1.3
a , a , a , a : int = self.model_tester.prepare_config_and_inputs_for_decoder()
a : Union[str, Any] = None
self.model_tester.create_and_check_model_as_decoder(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : str):
a , a , a , a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : Optional[int]):
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__UpperCAmelCase)
@slow
def __snake_case ( self : List[str]):
a : Optional[int] = "abeja/gpt-neox-japanese-2.7b"
a : int = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"]
a : Union[str, Any] = [
"データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。",
"100年後に必要とされる会社は、「人」が中心の会社です。",
"フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。",
"国境の長いトンネルを抜けると、そこは雪国だった。",
"美味しい日本食といえば、やっぱりお寿司ですよね。",
]
a : Tuple = GPTNeoXJapaneseTokenizer.from_pretrained(__UpperCAmelCase)
a : List[Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(__UpperCAmelCase)
a : int = []
for prompt in prompts:
a : List[str] = tokenizer(__UpperCAmelCase , return_tensors="pt").input_ids
a : List[str] = model.generate(__UpperCAmelCase , max_length=50)
a : Union[str, Any] = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase)
predicted_outputs += generated_string
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase)
| 40
|
"""simple docstring"""
def lowercase ( A_ )-> bool:
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
a : Tuple = sorted(string.lower() )
return len(A_ ) == len(set(A_ ) )
if __name__ == "__main__":
__lowercase = input("""Enter a string """).strip()
__lowercase = is_isogram(input_str)
print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
| 40
| 1
|
"""simple docstring"""
def lowercase ( A_ , A_ )-> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
a : Optional[Any] = str(bin(A_ ) )
binary_number += "0" * shift_amount
return binary_number
def lowercase ( A_ , A_ )-> str:
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
a : Tuple = str(bin(A_ ) )[2:]
if shift_amount >= len(A_ ):
return "0b0"
a : Tuple = binary_number[: len(A_ ) - shift_amount]
return "0b" + shifted_binary_number
def lowercase ( A_ , A_ )-> str:
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
a : str = "0" + str(bin(A_ ) ).strip("-" )[2:]
else: # Get binary (2's complement) representation of negative number
a : int = len(bin(A_ )[3:] ) # Find 2's complement of number
a : List[str] = bin(abs(A_ ) - (1 << binary_number_length) )[3:]
a : Dict = (
"1" + "0" * (binary_number_length - len(A_ )) + binary_number
)
if shift_amount >= len(A_ ):
return "0b" + binary_number[0] * len(A_ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(A_ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
__lowercase = datasets.utils.logging.get_logger(__name__)
@dataclass
class _A ( datasets.BuilderConfig ):
"""simple docstring"""
UpperCAmelCase : int = 1_0_0_0_0
UpperCAmelCase : Optional[List[str]] = None
UpperCAmelCase : Optional[datasets.Features] = None
class _A ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
UpperCAmelCase : str = ParquetConfig
def __snake_case ( self : Tuple):
return datasets.DatasetInfo(features=self.config.features)
def __snake_case ( self : List[Any] , __UpperCAmelCase : str):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''')
a : str = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__UpperCAmelCase , (str, list, tuple)):
a : Dict = data_files
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a : List[Any] = [dl_manager.iter_files(__UpperCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files})]
a : Dict = []
for split_name, files in data_files.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase):
a : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
a : Tuple = [dl_manager.iter_files(__UpperCAmelCase) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__UpperCAmelCase):
with open(__UpperCAmelCase , "rb") as f:
a : Tuple = datasets.Features.from_arrow_schema(pq.read_schema(__UpperCAmelCase))
break
splits.append(datasets.SplitGenerator(name=__UpperCAmelCase , gen_kwargs={"files": files}))
return splits
def __snake_case ( self : List[str] , __UpperCAmelCase : pa.Table):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
a : Optional[int] = table_cast(__UpperCAmelCase , self.info.features.arrow_schema)
return pa_table
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int):
a : Tuple = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema) != sorted(self.config.columns):
raise ValueError(
f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''')
for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase)):
with open(__UpperCAmelCase , "rb") as f:
a : Tuple = pq.ParquetFile(__UpperCAmelCase)
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns)):
a : Optional[Any] = pa.Table.from_batches([record_batch])
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'''{file_idx}_{batch_idx}''', self._cast_table(__UpperCAmelCase)
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(__UpperCAmelCase)}: {e}''')
raise
| 40
| 1
|
"""simple docstring"""
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__lowercase = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowercase ( A_ , A_=None )-> Optional[int]:
'''simple docstring'''
require_version(deps[pkg] , A_ )
| 40
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : int = """dpr"""
def __init__( self : List[Any] , __UpperCAmelCase : int=30522 , __UpperCAmelCase : Union[str, Any]=768 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : List[str]=12 , __UpperCAmelCase : Any=3072 , __UpperCAmelCase : Optional[int]="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : List[str]=2 , __UpperCAmelCase : Tuple=0.02 , __UpperCAmelCase : List[str]=1e-12 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : str="absolute" , __UpperCAmelCase : int = 0 , **__UpperCAmelCase : Tuple , ):
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a : List[Any] = vocab_size
a : Optional[Any] = hidden_size
a : Union[str, Any] = num_hidden_layers
a : Dict = num_attention_heads
a : int = hidden_act
a : Any = intermediate_size
a : Any = hidden_dropout_prob
a : Dict = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Union[str, Any] = type_vocab_size
a : Optional[Any] = initializer_range
a : Dict = layer_norm_eps
a : int = projection_dim
a : str = position_embedding_type
| 40
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {"""vocab_file""": """sentencepiece.bpe.model"""}
__lowercase = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
}
}
__lowercase = {
"""camembert-base""": 512,
}
__lowercase = """▁"""
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple="<s>" , __UpperCAmelCase : List[str]="</s>" , __UpperCAmelCase : int="</s>" , __UpperCAmelCase : List[Any]="<s>" , __UpperCAmelCase : Optional[int]="<unk>" , __UpperCAmelCase : Any="<pad>" , __UpperCAmelCase : Optional[Any]="<mask>" , __UpperCAmelCase : Tuple=["<s>NOTUSED", "</s>NOTUSED"] , __UpperCAmelCase : Optional[Dict[str, Any]] = None , **__UpperCAmelCase : List[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
a : Any = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) if isinstance(__UpperCAmelCase , __UpperCAmelCase) else mask_token
a : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
a : str = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__UpperCAmelCase))
a : List[Any] = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
a : Optional[int] = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
a : int = len(self.fairseq_tokens_to_ids)
a : Union[str, Any] = len(self.sp_model) + len(self.fairseq_tokens_to_ids)
a : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __snake_case ( self : str , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a : int = [self.cls_token_id]
a : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self : str , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase)
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase)) + [1]
return [1] + ([0] * len(__UpperCAmelCase)) + [1, 1] + ([0] * len(__UpperCAmelCase)) + [1]
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None):
a : List[str] = [self.sep_token_id]
a : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def __snake_case ( self : str):
return len(self.fairseq_tokens_to_ids) + len(self.sp_model)
def __snake_case ( self : Union[str, Any]):
a : Any = {self.convert_ids_to_tokens(__UpperCAmelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __snake_case ( self : str , __UpperCAmelCase : str):
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase)
def __snake_case ( self : List[Any] , __UpperCAmelCase : Dict):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(__UpperCAmelCase) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(__UpperCAmelCase)
def __snake_case ( self : Dict , __UpperCAmelCase : Optional[int]):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : Tuple):
a : int = []
a : Dict = ""
a : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase) + token
a : str = True
a : str = []
else:
current_sub_tokens.append(__UpperCAmelCase)
a : str = False
out_string += self.sp_model.decode(__UpperCAmelCase)
return out_string.strip()
def __getstate__( self : Tuple):
a : Any = self.__dict__.copy()
a : Optional[Any] = None
return state
def __setstate__( self : Optional[int] , __UpperCAmelCase : List[str]):
a : Dict = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
a : Any = {}
a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None):
if not os.path.isdir(__UpperCAmelCase):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
a : Optional[Any] = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__UpperCAmelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , __UpperCAmelCase)
elif not os.path.isfile(self.vocab_file):
with open(__UpperCAmelCase , "wb") as fi:
a : List[Any] = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase)
return (out_vocab_file,)
| 40
|
"""simple docstring"""
class _A :
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : int):
a : Tuple = size
a : Dict = [0] * size
a : Optional[int] = [0] * size
@staticmethod
def __snake_case ( __UpperCAmelCase : int):
return index | (index + 1)
@staticmethod
def __snake_case ( __UpperCAmelCase : int):
return (index & (index + 1)) - 1
def __snake_case ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : int):
a : Union[str, Any] = value
while index < self.size:
a : Dict = self.get_prev(__UpperCAmelCase) + 1
if current_left_border == index:
a : Optional[int] = value
else:
a : Any = max(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a : Optional[int] = self.get_next(__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : int):
right -= 1 # Because of right is exclusive
a : List[str] = 0
while left <= right:
a : Dict = self.get_prev(__UpperCAmelCase)
if left <= current_left:
a : Optional[int] = max(__UpperCAmelCase , self.tree[right])
a : Optional[Any] = current_left
else:
a : List[str] = max(__UpperCAmelCase , self.arr[right])
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 1
|
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = """encodec"""
def __init__( self : Any , __UpperCAmelCase : List[Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , __UpperCAmelCase : str=24000 , __UpperCAmelCase : List[Any]=1 , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[Any]=128 , __UpperCAmelCase : Any=32 , __UpperCAmelCase : str=1 , __UpperCAmelCase : str=[8, 5, 4, 2] , __UpperCAmelCase : Union[str, Any]="weight_norm" , __UpperCAmelCase : Tuple=7 , __UpperCAmelCase : Tuple=7 , __UpperCAmelCase : Tuple=3 , __UpperCAmelCase : Optional[int]=2 , __UpperCAmelCase : Any=True , __UpperCAmelCase : str="reflect" , __UpperCAmelCase : Any=2 , __UpperCAmelCase : int=2 , __UpperCAmelCase : str=1.0 , __UpperCAmelCase : str=1024 , __UpperCAmelCase : Any=None , __UpperCAmelCase : Union[str, Any]=True , **__UpperCAmelCase : Dict , ):
a : Any = target_bandwidths
a : str = sampling_rate
a : Union[str, Any] = audio_channels
a : Optional[int] = normalize
a : Optional[Any] = chunk_length_s
a : str = overlap
a : List[str] = hidden_size
a : List[str] = num_filters
a : Union[str, Any] = num_residual_layers
a : Tuple = upsampling_ratios
a : Tuple = norm_type
a : List[str] = kernel_size
a : Tuple = last_kernel_size
a : Union[str, Any] = residual_kernel_size
a : Union[str, Any] = dilation_growth_rate
a : Any = use_causal_conv
a : str = pad_mode
a : Dict = compress
a : Union[str, Any] = num_lstm_layers
a : List[Any] = trim_right_ratio
a : str = codebook_size
a : Optional[int] = codebook_dim if codebook_dim is not None else hidden_size
a : int = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''')
super().__init__(**__UpperCAmelCase)
@property
def __snake_case ( self : Union[str, Any]):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def __snake_case ( self : Union[str, Any]):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length))
@property
def __snake_case ( self : List[str]):
a : Optional[int] = np.prod(self.upsampling_ratios)
return math.ceil(self.sampling_rate / hop_length)
@property
def __snake_case ( self : List[str]):
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10))
| 40
|
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : List[Any]):
a : str = 0
a : Optional[int] = [0]
a : Union[str, Any] = [0]
a : Any = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0)
a : List[str] = [60]
a : str = [10]
a : Optional[int] = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 0)
def __snake_case ( self : Optional[int]):
a : Any = 3
a : str = [1, 2, 3]
a : Tuple = [3, 2, 1]
a : Any = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 5)
def __snake_case ( self : Tuple):
a : int = 50
a : List[Any] = [60, 100, 120]
a : Optional[int] = [10, 20, 30]
a : str = len(__UpperCAmelCase)
self.assertEqual(k.knapsack(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) , 220)
if __name__ == "__main__":
unittest.main()
| 40
| 1
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowercase ( A_ )-> List[str]:
'''simple docstring'''
a : List[str] = {}
a : Optional[Any] = tokenizer(example["content"] , truncation=A_ )["input_ids"]
a : Optional[Any] = len(example["content"] ) / len(output["input_ids"] )
return output
__lowercase = HfArgumentParser(PretokenizationArguments)
__lowercase = parser.parse_args()
if args.num_workers is None:
__lowercase = multiprocessing.cpu_count()
__lowercase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__lowercase = time.time()
__lowercase = load_dataset(args.dataset_name, split="""train""")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
__lowercase = time.time()
__lowercase = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__lowercase = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 40
|
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : str = LayoutLMTokenizer
UpperCAmelCase : int = LayoutLMTokenizerFast
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Optional[Any] = True
def __snake_case ( self : Optional[int]):
super().setUp()
a : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def __snake_case ( self : Optional[int] , **__UpperCAmelCase : Tuple):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str):
a : Tuple = "UNwant\u00E9d,running"
a : Dict = "unwanted, running"
return input_text, output_text
def __snake_case ( self : Any):
a : List[Any] = self.tokenizer_class(self.vocab_file)
a : str = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(__UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [7, 4, 5, 10, 8, 9])
def __snake_case ( self : Dict):
pass
| 40
| 1
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__lowercase = logging.getLogger(__name__)
@dataclass
class _A :
"""simple docstring"""
UpperCAmelCase : str
UpperCAmelCase : List[str]
UpperCAmelCase : Optional[List[str]]
@dataclass
class _A :
"""simple docstring"""
UpperCAmelCase : List[int]
UpperCAmelCase : List[int]
UpperCAmelCase : Optional[List[int]] = None
UpperCAmelCase : Optional[List[int]] = None
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Dict = """train"""
UpperCAmelCase : Any = """dev"""
UpperCAmelCase : Tuple = """test"""
class _A :
"""simple docstring"""
@staticmethod
def __snake_case ( __UpperCAmelCase : str , __UpperCAmelCase : Union[Split, str]):
raise NotImplementedError
@staticmethod
def __snake_case ( __UpperCAmelCase : str):
raise NotImplementedError
@staticmethod
def __snake_case ( __UpperCAmelCase : List[InputExample] , __UpperCAmelCase : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : PreTrainedTokenizer , __UpperCAmelCase : Optional[int]=False , __UpperCAmelCase : str="[CLS]" , __UpperCAmelCase : List[Any]=1 , __UpperCAmelCase : Any="[SEP]" , __UpperCAmelCase : Any=False , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : Optional[Any]=0 , __UpperCAmelCase : List[Any]=-100 , __UpperCAmelCase : Any=0 , __UpperCAmelCase : Union[str, Any]=True , ):
a : str = {label: i for i, label in enumerate(__UpperCAmelCase)}
a : Optional[Any] = []
for ex_index, example in enumerate(__UpperCAmelCase):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" , __UpperCAmelCase , len(__UpperCAmelCase))
a : str = []
a : int = []
for word, label in zip(example.words , example.labels):
a : Tuple = tokenizer.tokenize(__UpperCAmelCase)
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__UpperCAmelCase) > 0:
tokens.extend(__UpperCAmelCase)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__UpperCAmelCase) - 1))
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
a : Dict = tokenizer.num_special_tokens_to_add()
if len(__UpperCAmelCase) > max_seq_length - special_tokens_count:
a : Dict = tokens[: (max_seq_length - special_tokens_count)]
a : str = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
a : Union[str, Any] = [sequence_a_segment_id] * len(__UpperCAmelCase)
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
a : List[str] = [cls_token] + tokens
a : Tuple = [pad_token_label_id] + label_ids
a : List[Any] = [cls_token_segment_id] + segment_ids
a : str = tokenizer.convert_tokens_to_ids(__UpperCAmelCase)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
a : int = [1 if mask_padding_with_zero else 0] * len(__UpperCAmelCase)
# Zero-pad up to the sequence length.
a : str = max_seq_length - len(__UpperCAmelCase)
if pad_on_left:
a : Union[str, Any] = ([pad_token] * padding_length) + input_ids
a : Tuple = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
a : Optional[Any] = ([pad_token_segment_id] * padding_length) + segment_ids
a : int = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__UpperCAmelCase) == max_seq_length
assert len(__UpperCAmelCase) == max_seq_length
assert len(__UpperCAmelCase) == max_seq_length
assert len(__UpperCAmelCase) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" , example.guid)
logger.info("tokens: %s" , " ".join([str(__UpperCAmelCase) for x in tokens]))
logger.info("input_ids: %s" , " ".join([str(__UpperCAmelCase) for x in input_ids]))
logger.info("input_mask: %s" , " ".join([str(__UpperCAmelCase) for x in input_mask]))
logger.info("segment_ids: %s" , " ".join([str(__UpperCAmelCase) for x in segment_ids]))
logger.info("label_ids: %s" , " ".join([str(__UpperCAmelCase) for x in label_ids]))
if "token_type_ids" not in tokenizer.model_input_names:
a : Optional[Any] = None
features.append(
InputFeatures(
input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , label_ids=__UpperCAmelCase))
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : List[InputFeatures]
UpperCAmelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : Optional[Any] , __UpperCAmelCase : TokenClassificationTask , __UpperCAmelCase : str , __UpperCAmelCase : PreTrainedTokenizer , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : int=False , __UpperCAmelCase : Split = Split.train , ):
# Load data features from cache or dataset file
a : int = os.path.join(
__UpperCAmelCase , "cached_{}_{}_{}".format(mode.value , tokenizer.__class__.__name__ , str(__UpperCAmelCase)) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a : Dict = cached_features_file + ".lock"
with FileLock(__UpperCAmelCase):
if os.path.exists(__UpperCAmelCase) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''')
a : Optional[int] = torch.load(__UpperCAmelCase)
else:
logger.info(f'''Creating features from dataset file at {data_dir}''')
a : Tuple = token_classification_task.read_examples_from_file(__UpperCAmelCase , __UpperCAmelCase)
# TODO clean up all this to leverage built-in features of tokenizers
a : Any = token_classification_task.convert_examples_to_features(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , cls_token_at_end=bool(model_type in ["xlnet"]) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == "left") , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'''Saving features into cached file {cached_features_file}''')
torch.save(self.features , __UpperCAmelCase)
def __len__( self : int):
return len(self.features)
def __getitem__( self : str , __UpperCAmelCase : Tuple):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class _A :
"""simple docstring"""
UpperCAmelCase : List[InputFeatures]
UpperCAmelCase : int = -1_0_0
def __init__( self : str , __UpperCAmelCase : TokenClassificationTask , __UpperCAmelCase : str , __UpperCAmelCase : PreTrainedTokenizer , __UpperCAmelCase : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Tuple=False , __UpperCAmelCase : Split = Split.train , ):
a : Optional[Any] = token_classification_task.read_examples_from_file(__UpperCAmelCase , __UpperCAmelCase)
# TODO clean up all this to leverage built-in features of tokenizers
a : Tuple = token_classification_task.convert_examples_to_features(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , cls_token_at_end=bool(model_type in ["xlnet"]) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == "left") , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
a : List[Any] = tf.data.Dataset.from_generator(
__UpperCAmelCase , ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) , (
{"input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None])},
tf.TensorShape([None]),
) , )
else:
a : Tuple = tf.data.Dataset.from_generator(
__UpperCAmelCase , ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) , (
{
"input_ids": tf.TensorShape([None]),
"attention_mask": tf.TensorShape([None]),
"token_type_ids": tf.TensorShape([None]),
},
tf.TensorShape([None]),
) , )
def __snake_case ( self : Optional[Any]):
a : str = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features)))
return self.dataset
def __len__( self : Optional[Any]):
return len(self.features)
def __getitem__( self : str , __UpperCAmelCase : Dict):
return self.features[i]
| 40
|
"""simple docstring"""
def lowercase ( A_ )-> str:
'''simple docstring'''
if isinstance(A_ , A_ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(A_ , A_ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
a : Optional[Any] = False
if num < 0:
a : Tuple = True
a : str = -num
a : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(A_ ) for e in binary )
return "0b" + "".join(str(A_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""facebook/timesformer""": """https://huggingface.co/facebook/timesformer/resolve/main/config.json""",
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : List[str] = """timesformer"""
def __init__( self : List[str] , __UpperCAmelCase : Dict=224 , __UpperCAmelCase : str=16 , __UpperCAmelCase : Any=3 , __UpperCAmelCase : Optional[int]=8 , __UpperCAmelCase : Tuple=768 , __UpperCAmelCase : Optional[int]=12 , __UpperCAmelCase : Dict=12 , __UpperCAmelCase : Union[str, Any]=3072 , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : Tuple=0.0 , __UpperCAmelCase : Union[str, Any]=0.0 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Dict=1e-6 , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Union[str, Any]="divided_space_time" , __UpperCAmelCase : Any=0 , **__UpperCAmelCase : Union[str, Any] , ):
super().__init__(**__UpperCAmelCase)
a : Optional[Any] = image_size
a : List[Any] = patch_size
a : Tuple = num_channels
a : Optional[int] = num_frames
a : int = hidden_size
a : Optional[Any] = num_hidden_layers
a : List[str] = num_attention_heads
a : List[str] = intermediate_size
a : Optional[Any] = hidden_act
a : int = hidden_dropout_prob
a : List[Any] = attention_probs_dropout_prob
a : int = initializer_range
a : Tuple = layer_norm_eps
a : Optional[int] = qkv_bias
a : str = attention_type
a : List[Any] = drop_path_rate
| 40
|
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def lowercase ( A_ , A_ , A_ , A_ )-> Union[str, Any]:
'''simple docstring'''
a : Any = sorted(zip(A_ , A_ ) , key=lambda A_ : x[0] / x[1] , reverse=A_ )
a , a : int = [i[0] for i in r], [i[1] for i in r]
a : Union[str, Any] = list(accumulate(A_ ) )
a : Optional[Any] = bisect(A_ , A_ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 1
|
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowercase ( A_ )-> List[Any]:
'''simple docstring'''
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
class _A :
"""simple docstring"""
def __init__( self : str , __UpperCAmelCase : int):
a : List[Any] = metric_id
class _A :
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = [MetricMock(_a ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def __snake_case ( self : List[str]):
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Any:
'''simple docstring'''
if "tmp_path" in args:
a : Union[str, Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(A_ , match="https://huggingface.co/docs/evaluate" ):
func(*A_ )
| 40
|
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowercase ( A_ , A_ , A_ = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(A_ ), magnitude * sin(A_ )]
return [magnitude * cos(radians(A_ ) ), magnitude * sin(radians(A_ ) )]
def lowercase ( A_ , A_ , A_ = 10**-1 )-> bool:
'''simple docstring'''
a : NDArray[floataa] = cross(A_ , A_ )
a : float = sum(A_ )
return abs(A_ ) < eps
if __name__ == "__main__":
# Test to check if it works
__lowercase = array(
[
polar_force(7_18.4, 180 - 30),
polar_force(8_79.54, 45),
polar_force(100, -90),
]
)
__lowercase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
__lowercase = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
__lowercase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
__lowercase = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
__lowercase = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 40
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.