code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def lowerCamelCase__ ( a__ , a__ , a__) -> float:
"""simple docstring"""
_snake_case : Tuple = x
_snake_case : List[Any] = y
for step in range(lowerCAmelCase__): # noqa: B007
_snake_case : str = a * a - b * b + x
_snake_case : List[str] = 2 * a * b + y
_snake_case : Optional[Any] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCamelCase__ ( a__) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def lowerCamelCase__ ( a__) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5) for i in colorsys.hsv_to_rgb(lowerCAmelCase__ , 1 , 1))
def lowerCamelCase__ ( a__ = 8_0_0 , a__ = 6_0_0 , a__ = -0.6 , a__ = 0 , a__ = 3.2 , a__ = 5_0 , a__ = True , ) -> Image.Image:
"""simple docstring"""
_snake_case : str = Image.new('RGB' , (image_width, image_height))
_snake_case : str = img.load()
# loop through the image-coordinates
for image_x in range(lowerCAmelCase__):
for image_y in range(lowerCAmelCase__):
# determine the figure-coordinates based on the image-coordinates
_snake_case : Tuple = figure_width / image_width * image_height
_snake_case : Union[str, Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
_snake_case : Dict = figure_center_y + (image_y / image_height - 0.5) * figure_height
_snake_case : Tuple = get_distance(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_snake_case : List[str] = get_color_coded_rgb(lowerCAmelCase__)
else:
_snake_case : Union[str, Any] = get_black_and_white_rgb(lowerCAmelCase__)
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
SCREAMING_SNAKE_CASE_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 517
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int = 5_0 ) -> int:
'''simple docstring'''
lowercase = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 359
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
a_ = CTRLTokenizer
a_ = False
a_ = False
def _a ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A_ : str = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
A_ : Union[str, Any] = dict(zip(lowerCAmelCase_ ,range(len(lowerCAmelCase_ ) ) ) )
A_ : Any = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
A_ : str = {"""unk_token""": """<unk>"""}
A_ : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
def _a ( self : int ,**_a : List[Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**lowerCAmelCase_ )
def _a ( self : List[Any] ,_a : List[Any] ):
'''simple docstring'''
A_ : Optional[Any] = """adapt react readapt apt"""
A_ : Optional[int] = """adapt react readapt apt"""
return input_text, output_text
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Dict = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
A_ : Tuple = """adapt react readapt apt"""
A_ : int = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
A_ : Any = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ ,lowerCAmelCase_ )
A_ : int = tokens + [tokenizer.unk_token]
A_ : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) ,lowerCAmelCase_ )
| 716
|
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__magic_name__ = trt.Logger(trt.Logger.WARNING)
__magic_name__ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__magic_name__ = parser.parse_args()
if args.tokenizer_name:
__magic_name__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__magic_name__ = args.per_device_eval_batch_size
__magic_name__ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__magic_name__ = True
__magic_name__ = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__magic_name__ = 'temp_engine/bert-fp16.engine'
if args.inta:
__magic_name__ = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__magic_name__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__magic_name__ = [network.get_input(i) for i in range(network.num_inputs)]
__magic_name__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__magic_name__ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__magic_name__ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__magic_name__ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : List[str]):
A_ : str = np.asarray(inputs["""input_ids"""] , dtype=np.intaa)
A_ : int = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa)
A_ : Optional[int] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa)
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase)
# start time
A_ : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCamelCase) for d_inp in d_inputs] + [int(lowerCamelCase), int(lowerCamelCase)] , stream_handle=stream.handle)
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Synchronize the stream and take time
stream.synchronize()
# end time
A_ : str = time.time()
A_ : Tuple = end_time - start_time
A_ : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__magic_name__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__magic_name__ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__magic_name__ = raw_datasets['validation'].column_names
__magic_name__ = 'question' if 'question' in column_names else column_names[0]
__magic_name__ = 'context' if 'context' in column_names else column_names[1]
__magic_name__ = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__magic_name__ = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__magic_name__ = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCamelCase ( lowerCamelCase : Dict):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
A_ : List[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
A_ : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase , return_offsets_mapping=lowerCamelCase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
A_ : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
A_ : Union[str, Any] = []
for i in range(len(tokenized_examples["""input_ids"""])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
A_ : Any = tokenized_examples.sequence_ids(lowerCamelCase)
A_ : Tuple = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
A_ : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
A_ : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i])
]
return tokenized_examples
__magic_name__ = raw_datasets['validation']
# Validation Feature Creation
__magic_name__ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__magic_name__ = default_data_collator
__magic_name__ = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__magic_name__ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
A_ : Tuple = postprocess_qa_predictions(
examples=lowerCamelCase , features=lowerCamelCase , predictions=lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
A_ : Dict = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
A_ : Union[str, Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
A_ : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCamelCase , label_ids=lowerCamelCase)
__magic_name__ = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return trt.volume(engine.get_binding_shape(lowerCamelCase)) * engine.get_binding_dtype(lowerCamelCase).itemsize
# Allocate device memory for inputs and outputs.
__magic_name__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__magic_name__ = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
__magic_name__ = 0.0
__magic_name__ = 0
__magic_name__ = timeit.default_timer()
__magic_name__ = None
for step, batch in enumerate(eval_dataloader):
__magic_name__ , __magic_name__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__magic_name__ , __magic_name__ = outputs
__magic_name__ = torch.tensor(start_logits)
__magic_name__ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__magic_name__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__magic_name__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__magic_name__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__magic_name__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__magic_name__ = nested_truncate(all_preds, len(eval_dataset))
__magic_name__ = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__magic_name__ = post_processing_function(eval_examples, eval_dataset, all_preds)
__magic_name__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 27
| 0
|
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE_ = '''CompVis/stable-diffusion-v1-1'''
SCREAMING_SNAKE_CASE_ = '''CompVis/stable-diffusion-v1-2'''
SCREAMING_SNAKE_CASE_ = '''CompVis/stable-diffusion-v1-3'''
SCREAMING_SNAKE_CASE_ = '''CompVis/stable-diffusion-v1-4'''
class lowerCAmelCase_ ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = True , ) -> Dict:
super()._init_()
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(__lowercase )
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(__lowercase )
__lowerCAmelCase = StableDiffusionPipeline.from_pretrained(__lowercase )
__lowerCAmelCase = StableDiffusionPipeline(
vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , unet=__lowercase , scheduler=__lowercase , safety_checker=__lowercase , feature_extractor=__lowercase , requires_safety_checker=__lowercase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def A__ ( self ) -> Dict:
return {k: getattr(self , __lowercase ) for k in self.config.keys() if not k.startswith("""_""" )}
def A__ ( self , snake_case_ = "auto" ) -> str:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowerCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__lowercase )
def A__ ( self ) -> Union[str, Any]:
self.enable_attention_slicing(__lowercase )
@torch.no_grad()
def A__ ( self , snake_case_ , snake_case_ = 512 , snake_case_ = 512 , snake_case_ = 50 , snake_case_ = 7.5 , snake_case_ = None , snake_case_ = 1 , snake_case_ = 0.0 , snake_case_ = None , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , snake_case_ = None , snake_case_ = 1 , **snake_case_ , ) -> List[Any]:
return self.pipea(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
@torch.no_grad()
def A__ ( self , snake_case_ , snake_case_ = 512 , snake_case_ = 512 , snake_case_ = 50 , snake_case_ = 7.5 , snake_case_ = None , snake_case_ = 1 , snake_case_ = 0.0 , snake_case_ = None , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , snake_case_ = None , snake_case_ = 1 , **snake_case_ , ) -> List[str]:
return self.pipea(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
@torch.no_grad()
def A__ ( self , snake_case_ , snake_case_ = 512 , snake_case_ = 512 , snake_case_ = 50 , snake_case_ = 7.5 , snake_case_ = None , snake_case_ = 1 , snake_case_ = 0.0 , snake_case_ = None , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , snake_case_ = None , snake_case_ = 1 , **snake_case_ , ) -> List[str]:
return self.pipea(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
@torch.no_grad()
def A__ ( self , snake_case_ , snake_case_ = 512 , snake_case_ = 512 , snake_case_ = 50 , snake_case_ = 7.5 , snake_case_ = None , snake_case_ = 1 , snake_case_ = 0.0 , snake_case_ = None , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , snake_case_ = None , snake_case_ = 1 , **snake_case_ , ) -> Tuple:
return self.pipea(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
@torch.no_grad()
def A__ ( self , snake_case_ , snake_case_ = 512 , snake_case_ = 512 , snake_case_ = 50 , snake_case_ = 7.5 , snake_case_ = None , snake_case_ = 1 , snake_case_ = 0.0 , snake_case_ = None , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , snake_case_ = None , snake_case_ = 1 , **snake_case_ , ) -> Optional[Any]:
__lowerCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(__lowercase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
__lowerCAmelCase = self.textaimg_sda_a(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.2
__lowerCAmelCase = self.textaimg_sda_a(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.3
__lowerCAmelCase = self.textaimg_sda_a(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.4
__lowerCAmelCase = self.textaimg_sda_a(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 465
|
def _lowerCAmelCase ( __lowerCAmelCase = 200 ) -> int:
"""simple docstring"""
snake_case__ : Optional[int] = [1, 2, 5, 10, 20, 50, 100, 200]
snake_case__ : List[Any] = [0] * (pence + 1)
snake_case__ : str = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__lowerCAmelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 252
| 0
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
UpperCAmelCase : int =logging.get_logger(__name__)
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
def constraint_to_multiple_of(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=0 , _lowerCAmelCase=None):
UpperCamelCase_ = round(val / multiple) * multiple
if max_val is not None and x > max_val:
UpperCamelCase_ = math.floor(val / multiple) * multiple
if x < min_val:
UpperCamelCase_ = math.ceil(val / multiple) * multiple
return x
UpperCamelCase_ = (output_size, output_size) if isinstance(_lowerCAmelCase , _lowerCAmelCase) else output_size
UpperCamelCase_ , UpperCamelCase_ = get_image_size(_lowerCAmelCase)
UpperCamelCase_ , UpperCamelCase_ = output_size
# determine new height and width
UpperCamelCase_ = output_height / input_height
UpperCamelCase_ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width) < abs(1 - scale_height):
# fit width
UpperCamelCase_ = scale_width
else:
# fit height
UpperCamelCase_ = scale_height
UpperCamelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=_lowerCAmelCase)
UpperCamelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=_lowerCAmelCase)
return (new_height, new_width)
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = ["""pixel_values"""]
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = False , snake_case__ = 1 , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = None , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
UpperCamelCase_ = size if size is not None else {"height": 384, "width": 384}
UpperCamelCase_ = get_size_dict(snake_case__ )
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = keep_aspect_ratio
UpperCamelCase_ = ensure_multiple_of
UpperCamelCase_ = resample
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ = False , snake_case__ = 1 , snake_case__ = PILImageResampling.BICUBIC , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
UpperCamelCase_ = get_resize_output_image_size(
snake_case__ , output_size=(size["height"], size["width"]) , keep_aspect_ratio=snake_case__ , multiple=snake_case__ , )
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def _lowerCamelCase ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ):
'''simple docstring'''
UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ = size if size is not None else self.size
UpperCamelCase_ = get_size_dict(snake_case__ )
UpperCamelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCamelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCamelCase_ = resample if resample is not None else self.resample
UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_ = image_std if image_std is not None else self.image_std
UpperCamelCase_ = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase_ = [to_numpy_array(snake_case__ ) for image in images]
if do_resize:
UpperCamelCase_ = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images]
if do_rescale:
UpperCamelCase_ = [self.rescale(image=snake_case__ , scale=snake_case__ ) for image in images]
if do_normalize:
UpperCamelCase_ = [self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ ) for image in images]
UpperCamelCase_ = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images]
UpperCamelCase_ = {"pixel_values": images}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
def _lowerCamelCase ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
UpperCamelCase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(snake_case__ ) != len(snake_case__ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(snake_case__ ):
UpperCamelCase_ = target_sizes.numpy()
UpperCamelCase_ = []
for idx in range(len(snake_case__ ) ):
UpperCamelCase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=snake_case__ )
UpperCamelCase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(snake_case__ )
else:
UpperCamelCase_ = logits.argmax(dim=1 )
UpperCamelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 504
|
import torch
def _lowerCAmelCase ():
if torch.cuda.is_available():
UpperCamelCase_ = torch.cuda.device_count()
else:
UpperCamelCase_ = 0
print(f"""Successfully ran on {num_gpus} GPUs""")
if __name__ == "__main__":
main()
| 504
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =AltDiffusionPipeline
SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_IMAGE_PARAMS
SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_IMAGE_PARAMS
def __a ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : Tuple = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
UpperCAmelCase__ : int = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , )
torch.manual_seed(0 )
UpperCAmelCase__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
UpperCAmelCase__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_2 , )
UpperCAmelCase__ : List[Any] = CLIPTextModel(snake_case__ )
UpperCAmelCase__ : List[Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
UpperCAmelCase__ : str = 7_7
UpperCAmelCase__ : Any = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __a ( self : List[str] , snake_case__ : Optional[int] , snake_case__ : Union[str, Any]=0 ):
'''simple docstring'''
if str(snake_case__ ).startswith("mps" ):
UpperCAmelCase__ : Any = torch.manual_seed(snake_case__ )
else:
UpperCAmelCase__ : Any = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
UpperCAmelCase__ : int = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __a ( self : Tuple ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def __a ( self : List[Any] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Dict = self.get_dummy_components()
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase__ : Dict = RobertaSeriesModelWithTransformation(snake_case__ )
UpperCAmelCase__ : Any = text_encoder
UpperCAmelCase__ : Optional[int] = AltDiffusionPipeline(**snake_case__ )
UpperCAmelCase__ : List[str] = alt_pipe.to(snake_case__ )
alt_pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase__ : Optional[Any] = self.get_dummy_inputs(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = "A photo of an astronaut"
UpperCAmelCase__ : str = alt_pipe(**snake_case__ )
UpperCAmelCase__ : Any = output.images
UpperCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase__ : Tuple = np.array(
[0.574_8162, 0.6044_7145, 0.4882_1217, 0.5010_0636, 0.543_1185, 0.4576_3683, 0.4965_7696, 0.4813_2733, 0.4757_3093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Tuple = self.get_dummy_components()
UpperCAmelCase__ : Dict = PNDMScheduler(skip_prk_steps=snake_case__ )
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5_0_0_2 , )
# TODO: remove after fixing the non-deterministic text encoder
UpperCAmelCase__ : Optional[int] = RobertaSeriesModelWithTransformation(snake_case__ )
UpperCAmelCase__ : str = text_encoder
UpperCAmelCase__ : Tuple = AltDiffusionPipeline(**snake_case__ )
UpperCAmelCase__ : Tuple = alt_pipe.to(snake_case__ )
alt_pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase__ : Dict = self.get_dummy_inputs(snake_case__ )
UpperCAmelCase__ : Tuple = alt_pipe(**snake_case__ )
UpperCAmelCase__ : Optional[Any] = output.images
UpperCAmelCase__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
UpperCAmelCase__ : Any = np.array(
[0.5160_5093, 0.570_7241, 0.4736_5507, 0.5057_8886, 0.563_3877, 0.464_2503, 0.518_2081, 0.4876_3484, 0.4908_4237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __a ( self : Optional[int] ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Optional[int] ):
'''simple docstring'''
# make sure here that pndm scheduler skips prk
UpperCAmelCase__ : Optional[int] = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , safety_checker=snake_case__ )
UpperCAmelCase__ : List[Any] = alt_pipe.to(snake_case__ )
alt_pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase__ : Optional[Any] = "A painting of a squirrel eating a burger"
UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase__ : Union[str, Any] = alt_pipe([prompt] , generator=snake_case__ , guidance_scale=6.0 , num_inference_steps=2_0 , output_type="np" )
UpperCAmelCase__ : List[str] = output.images
UpperCAmelCase__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase__ : Optional[int] = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = DDIMScheduler.from_pretrained("BAAI/AltDiffusion" , subfolder="scheduler" )
UpperCAmelCase__ : Union[str, Any] = AltDiffusionPipeline.from_pretrained("BAAI/AltDiffusion" , scheduler=snake_case__ , safety_checker=snake_case__ )
UpperCAmelCase__ : Any = alt_pipe.to(snake_case__ )
alt_pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase__ : str = "A painting of a squirrel eating a burger"
UpperCAmelCase__ : Any = torch.manual_seed(0 )
UpperCAmelCase__ : str = alt_pipe([prompt] , generator=snake_case__ , num_inference_steps=2 , output_type="numpy" )
UpperCAmelCase__ : Union[str, Any] = output.images
UpperCAmelCase__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
UpperCAmelCase__ : Union[str, Any] = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 438
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {
"""uw-madison/mra-base-512-4""": """https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json""",
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ ='''mra'''
def __init__( self : Any , snake_case__ : List[str]=5_0_2_6_5 , snake_case__ : Any=7_6_8 , snake_case__ : Union[str, Any]=1_2 , snake_case__ : Optional[Any]=1_2 , snake_case__ : Tuple=3_0_7_2 , snake_case__ : str="gelu" , snake_case__ : Any=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : Tuple=5_1_2 , snake_case__ : Union[str, Any]=1 , snake_case__ : List[Any]=0.02 , snake_case__ : str=1e-5 , snake_case__ : List[Any]="absolute" , snake_case__ : str=4 , snake_case__ : List[str]="full" , snake_case__ : Tuple=0 , snake_case__ : Any=0 , snake_case__ : Union[str, Any]=1 , snake_case__ : int=0 , snake_case__ : int=2 , **snake_case__ : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase__ : List[Any] = vocab_size
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Any = hidden_size
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : str = num_attention_heads
UpperCAmelCase__ : int = intermediate_size
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : List[str] = hidden_dropout_prob
UpperCAmelCase__ : List[str] = attention_probs_dropout_prob
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : Any = type_vocab_size
UpperCAmelCase__ : Dict = layer_norm_eps
UpperCAmelCase__ : Tuple = position_embedding_type
UpperCAmelCase__ : List[str] = block_per_row
UpperCAmelCase__ : Optional[Any] = approx_mode
UpperCAmelCase__ : Any = initial_prior_first_n_blocks
UpperCAmelCase__ : List[Any] = initial_prior_diagonal_n_blocks
| 438
| 1
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
SCREAMING_SNAKE_CASE_ = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> str:
a_ : List[str] = {}
with open(_A, "r" ) as file:
for line_number, line in enumerate(_A ):
a_ : int = line.strip()
if line:
a_ : Any = line.split()
a_ : Any = line_number
a_ : List[Any] = words[0]
a_ : int = value
return result
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Any:
for attribute in key.split("." ):
a_ : Any = getattr(_A, _A )
a_ : str = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_A ):
a_ : int = PARAM_MAPPING[full_name.split("." )[-1]]
a_ : Tuple = "param"
if weight_type is not None and weight_type != "param":
a_ : Union[str, Any] = getattr(_A, _A ).shape
elif weight_type is not None and weight_type == "param":
a_ : Tuple = hf_pointer
for attribute in hf_param_name.split("." ):
a_ : Tuple = getattr(_A, _A )
a_ : List[str] = shape_pointer.shape
# let's reduce dimension
a_ : List[str] = value[0]
else:
a_ : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a_ : Dict = value
elif weight_type == "weight_g":
a_ : Union[str, Any] = value
elif weight_type == "weight_v":
a_ : Union[str, Any] = value
elif weight_type == "bias":
a_ : int = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
a_ : Any = getattr(_A, _A )
a_ : int = value
else:
a_ : Dict = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Any:
a_ : Union[str, Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(_A ):
a_ : List[Any] = PARAM_MAPPING[full_name.split("." )[-1]]
a_ : int = "param"
if weight_type is not None and weight_type != "param":
a_ : Union[str, Any] = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
a_ : Union[str, Any] = ".".join([key, hf_param_name] )
else:
a_ : str = key
a_ : List[str] = value if "lm_head" in full_key else value[0]
SCREAMING_SNAKE_CASE_ = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__=None, SCREAMING_SNAKE_CASE__=None ) -> str:
a_ : int = False
for key, mapped_key in MAPPING.items():
a_ : Dict = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
a_ : Tuple = True
if "*" in mapped_key:
a_ : Any = name.split(_A )[0].split("." )[-2]
a_ : Optional[int] = mapped_key.replace("*", _A )
if "weight_g" in name:
a_ : Union[str, Any] = "weight_g"
elif "weight_v" in name:
a_ : List[Any] = "weight_v"
elif "bias" in name:
a_ : List[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a_ : Optional[Any] = "weight"
else:
a_ : Tuple = None
if hf_dict is not None:
rename_dict(_A, _A, _A, _A, _A )
else:
set_recursively(_A, _A, _A, _A, _A )
return is_used
return is_used
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Any:
a_ : List[Any] = []
a_ : Union[str, Any] = fairseq_model.state_dict()
a_ : Optional[int] = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
a_ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_A, _A, _A, _A, hf_model.config.feat_extract_norm == "group", )
a_ : Optional[int] = True
else:
a_ : Optional[int] = load_wavaveca_layer(_A, _A, _A )
if not is_used:
unused_weights.append(_A )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Dict:
a_ : int = full_name.split("conv_layers." )[-1]
a_ : Optional[Any] = name.split("." )
a_ : Any = int(items[0] )
a_ : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
a_ : Tuple = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
a_ : Tuple = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
a_ : Union[str, Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
a_ : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_A )
@torch.no_grad()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__=None, SCREAMING_SNAKE_CASE__=None, SCREAMING_SNAKE_CASE__=True, SCREAMING_SNAKE_CASE__=False ) -> Any:
if config_path is not None:
a_ : Optional[Any] = WavaVecaConfig.from_pretrained(_A )
else:
a_ : str = WavaVecaConfig()
if is_seq_class:
a_ : str = read_txt_into_dict(_A )
a_ : List[Any] = idalabel
a_ : List[Any] = WavaVecaForSequenceClassification(_A )
a_ : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16_000, padding_value=0, do_normalize=_A, return_attention_mask=_A, )
feature_extractor.save_pretrained(_A )
elif is_finetuned:
if dict_path:
a_ : Dict = Dictionary.load(_A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a_ : List[Any] = target_dict.pad_index
a_ : str = target_dict.bos_index
a_ : str = target_dict.eos_index
a_ : Tuple = len(target_dict.symbols )
a_ : Optional[Any] = os.path.join(_A, "vocab.json" )
if not os.path.isdir(_A ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_A ) )
return
os.makedirs(_A, exist_ok=_A )
a_ : List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
a_ : str = 0
a_ : Dict = 1
with open(_A, "w", encoding="utf-8" ) as vocab_handle:
json.dump(_A, _A )
a_ : Any = WavaVecaCTCTokenizer(
_A, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="|", do_lower_case=_A, )
a_ : Dict = True if config.feat_extract_norm == "layer" else False
a_ : List[Any] = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16_000, padding_value=0, do_normalize=_A, return_attention_mask=_A, )
a_ : Any = WavaVecaProcessor(feature_extractor=_A, tokenizer=_A )
processor.save_pretrained(_A )
a_ : int = WavaVecaForCTC(_A )
else:
a_ : Optional[Any] = WavaVecaForPreTraining(_A )
if is_finetuned or is_seq_class:
a_ : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
a_ : Dict = argparse.Namespace(task="audio_pretraining" )
a_ : Any = fairseq.tasks.setup_task(_A )
a_ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=_A )
a_ : List[str] = model[0].eval()
recursively_load_weights(_A, _A, not is_finetuned )
hf_wavavec.save_pretrained(_A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 705
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 370
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a__ : List[str] = {"""configuration_yolos""": ["""YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """YolosConfig""", """YolosOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ["""YolosFeatureExtractor"""]
a__ : str = ["""YolosImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
"""YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""YolosForObjectDetection""",
"""YolosModel""",
"""YolosPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 589
|
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Any = logging.get_logger(__name__)
a__ : Tuple = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class __magic_name__ ( _UpperCamelCase ):
UpperCamelCase : Tuple = "encodec"
def __init__( self , __magic_name__=[1.5, 3.0, 6.0, 12.0, 24.0] , __magic_name__=2_4_0_0_0 , __magic_name__=1 , __magic_name__=False , __magic_name__=None , __magic_name__=None , __magic_name__=1_2_8 , __magic_name__=3_2 , __magic_name__=1 , __magic_name__=[8, 5, 4, 2] , __magic_name__="weight_norm" , __magic_name__=7 , __magic_name__=7 , __magic_name__=3 , __magic_name__=2 , __magic_name__=True , __magic_name__="reflect" , __magic_name__=2 , __magic_name__=2 , __magic_name__=1.0 , __magic_name__=1_0_2_4 , __magic_name__=None , __magic_name__=True , **__magic_name__ , ):
"""simple docstring"""
_lowerCAmelCase = target_bandwidths
_lowerCAmelCase = sampling_rate
_lowerCAmelCase = audio_channels
_lowerCAmelCase = normalize
_lowerCAmelCase = chunk_length_s
_lowerCAmelCase = overlap
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_filters
_lowerCAmelCase = num_residual_layers
_lowerCAmelCase = upsampling_ratios
_lowerCAmelCase = norm_type
_lowerCAmelCase = kernel_size
_lowerCAmelCase = last_kernel_size
_lowerCAmelCase = residual_kernel_size
_lowerCAmelCase = dilation_growth_rate
_lowerCAmelCase = use_causal_conv
_lowerCAmelCase = pad_mode
_lowerCAmelCase = compress
_lowerCAmelCase = num_lstm_layers
_lowerCAmelCase = trim_right_ratio
_lowerCAmelCase = codebook_size
_lowerCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
_lowerCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**__magic_name__ )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0) )
| 589
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''MobileViTFeatureExtractor''']
lowercase = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 721
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCamelCase__ = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
lowerCamelCase : List[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase : Tuple = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCamelCase : List[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCamelCase : List[str] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def __lowercase ( self , a__ , a__ , a__ ) -> List[Any]:
'''simple docstring'''
__snake_case :Union[str, Any] = ZeroShotClassificationPipeline(
model=a__ , tokenizer=a__ , candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def __lowercase ( self , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
__snake_case :Tuple = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" )
self.assertEqual(a__ , {"""sequence""": ANY(a__ ), """labels""": [ANY(a__ )], """scores""": [ANY(a__ )]} )
# No kwarg
__snake_case :Optional[int] = classifier("""Who are you voting for in 2020?""" , ["""politics"""] )
self.assertEqual(a__ , {"""sequence""": ANY(a__ ), """labels""": [ANY(a__ )], """scores""": [ANY(a__ )]} )
__snake_case :Tuple = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] )
self.assertEqual(a__ , {"""sequence""": ANY(a__ ), """labels""": [ANY(a__ )], """scores""": [ANY(a__ )]} )
__snake_case :List[str] = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" )
self.assertEqual(
a__ , {"""sequence""": ANY(a__ ), """labels""": [ANY(a__ ), ANY(a__ )], """scores""": [ANY(a__ ), ANY(a__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
__snake_case :Optional[Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
a__ , {"""sequence""": ANY(a__ ), """labels""": [ANY(a__ ), ANY(a__ )], """scores""": [ANY(a__ ), ANY(a__ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
__snake_case :List[str] = classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" )
self.assertEqual(a__ , {"""sequence""": ANY(a__ ), """labels""": [ANY(a__ )], """scores""": [ANY(a__ )]} )
# https://github.com/huggingface/transformers/issues/13846
__snake_case :Union[str, Any] = classifier(["""I am happy"""] , ["""positive""", """negative"""] )
self.assertEqual(
a__ , [
{"""sequence""": ANY(a__ ), """labels""": [ANY(a__ ), ANY(a__ )], """scores""": [ANY(a__ ), ANY(a__ )]}
for i in range(1 )
] , )
__snake_case :Tuple = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] )
self.assertEqual(
a__ , [
{"""sequence""": ANY(a__ ), """labels""": [ANY(a__ ), ANY(a__ )], """scores""": [ANY(a__ ), ANY(a__ )]}
for i in range(2 )
] , )
with self.assertRaises(a__ ):
classifier("""""" , candidate_labels="""politics""" )
with self.assertRaises(a__ ):
classifier(a__ , candidate_labels="""politics""" )
with self.assertRaises(a__ ):
classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" )
with self.assertRaises(a__ ):
classifier("""Who are you voting for in 2020?""" , candidate_labels=a__ )
with self.assertRaises(a__ ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , )
with self.assertRaises(a__ ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=a__ , )
self.run_entailment_id(a__ )
def __lowercase ( self , a__ ) -> List[Any]:
'''simple docstring'''
__snake_case :Optional[Any] = zero_shot_classifier.model.config
__snake_case :Optional[Any] = config.labelaid
__snake_case :Dict = zero_shot_classifier.entailment_id
__snake_case :List[Any] = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
__snake_case :Optional[int] = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__snake_case :List[Any] = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__snake_case :str = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
__snake_case :Union[str, Any] = original_labelaid
self.assertEqual(a__ , zero_shot_classifier.entailment_id )
@require_torch
def __lowercase ( self ) -> int:
'''simple docstring'''
__snake_case :str = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 1_00 , candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :Dict = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
__snake_case :Tuple = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(a__ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_33, 0.3_33, 0.3_33],
} , )
@require_tf
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case :List[str] = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , )
__snake_case :List[Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(a__ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_33, 0.3_33, 0.3_33],
} , )
@slow
@require_torch
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
__snake_case :Dict = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" )
__snake_case :Optional[int] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(a__ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_76, 0.0_15, 0.0_09],
} , )
__snake_case :Union[str, Any] = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=a__ , )
self.assertEqual(
nested_simplify(a__ ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
@slow
@require_tf
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
__snake_case :Tuple = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" )
__snake_case :Optional[int] = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(a__ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_76, 0.0_15, 0.0_09],
} , )
__snake_case :Union[str, Any] = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=a__ , )
self.assertEqual(
nested_simplify(a__ ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
| 291
| 0
|
from __future__ import annotations
def __UpperCamelCase (lowerCAmelCase : list, lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> list:
A = []
A , A = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
A = result + left + right
return input_list
def __UpperCamelCase (lowerCAmelCase : list ) -> list:
if len(lowerCAmelCase ) <= 1:
return input_list
A = list(lowerCAmelCase )
# iteration for two-way merging
A = 2
while p <= len(lowerCAmelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0, len(lowerCAmelCase ), lowerCAmelCase ):
A = i
A = i + p - 1
A = (low + high + 1) // 2
A = merge(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
# final merge of last two parts
if p * 2 >= len(lowerCAmelCase ):
A = i
A = merge(lowerCAmelCase, 0, lowerCAmelCase, len(lowerCAmelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
_UpperCAmelCase = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
_UpperCAmelCase = []
else:
_UpperCAmelCase = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 699
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : PreTrainedTokenizer, lowerCAmelCase : int, lowerCAmelCase : Optional[int] = None, ) -> Dict:
A = {}
if train_file is not None:
A = [train_file]
if eval_file is not None:
A = [eval_file]
if test_file is not None:
A = [test_file]
A = datasets.load_dataset('csv', data_files=lowerCAmelCase )
A = list(ds[list(files.keys() )[0]].features.keys() )
A = features_name.pop(lowerCAmelCase )
A = list(set(ds[list(files.keys() )[0]][label_name] ) )
A = {label: i for i, label in enumerate(lowerCAmelCase )}
A = tokenizer.model_input_names
A = {}
if len(lowerCAmelCase ) == 1:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length' ), batched=lowerCAmelCase, )
elif len(lowerCAmelCase ) == 2:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length', ), batched=lowerCAmelCase, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = field(metadata={'''help''': '''Which column contains the label'''} )
SCREAMING_SNAKE_CASE : str = field(default=__lowercase , metadata={'''help''': '''The path of the training file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the development file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the test file'''} )
SCREAMING_SNAKE_CASE : int = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : bool = field(default=__lowercase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def __UpperCamelCase () -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
A , A , A , A = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=lowerCAmelCase, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(lowerCAmelCase ), labelaid=lowerCAmelCase, idalabel={id: label for label, id in labelaid.items()}, finetuning_task='text-classification', cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool('.bin' in model_args.model_name_or_path ), config=lowerCAmelCase, cache_dir=model_args.cache_dir, )
def compute_metrics(lowerCAmelCase : EvalPrediction ) -> Dict:
A = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A = TFTrainer(
model=lowerCAmelCase, args=lowerCAmelCase, train_dataset=lowerCAmelCase, eval_dataset=lowerCAmelCase, compute_metrics=lowerCAmelCase, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate()
A = os.path.join(training_args.output_dir, 'eval_results.txt' )
with open(lowerCAmelCase, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 699
| 1
|
def A_ ( _lowerCAmelCase ) -> list[int]:
UpperCamelCase : Optional[int] = [0 for i in range(len(_lowerCAmelCase ) )]
# initialize interval's left pointer and right pointer
UpperCamelCase , UpperCamelCase : Optional[Any] = 0, 0
for i in range(1 , len(_lowerCAmelCase ) ):
# case when current index is inside the interval
if i <= right_pointer:
UpperCamelCase : str = min(right_pointer - i + 1 , z_result[i - left_pointer] )
UpperCamelCase : Any = min_edge
while go_next(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
UpperCamelCase , UpperCamelCase : str = i, i + z_result[i] - 1
return z_result
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> bool:
return i + z_result[i] < len(_lowerCAmelCase ) and s[z_result[i]] == s[i + z_result[i]]
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
UpperCamelCase : List[Any] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
UpperCamelCase : int = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_lowerCAmelCase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 38
|
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__lowerCamelCase : Dict = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__lowerCamelCase : Tuple = {
"""facebook/blenderbot_small-90M""": 512,
}
class A__ ( __snake_case ):
_UpperCAmelCase :Union[str, Any] = VOCAB_FILES_NAMES
_UpperCAmelCase :Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :Optional[Any] = BlenderbotSmallTokenizer
def __init__( self , A_=None , A_=None , A_="<|endoftext|>" , A_="<|endoftext|>" , A_="<|endoftext|>" , A_=False , A_=True , **A_ , ):
'''simple docstring'''
super().__init__(
ByteLevelBPETokenizer(
vocab=A_ , merges=A_ , add_prefix_space=A_ , trim_offsets=A_ , ) , bos_token=A_ , eos_token=A_ , unk_token=A_ , **A_ , )
UpperCamelCase : Union[str, Any] = add_prefix_space
def __UpperCamelCase( self , A_ , A_=None ):
'''simple docstring'''
UpperCamelCase : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCamelCase( self , A_ , A_ = None ):
'''simple docstring'''
UpperCamelCase : Tuple = [self.sep_token_id]
UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 38
| 1
|
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
a = logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class UpperCamelCase__ ( __magic_name__ ):
def __init__( self : List[Any] , **UpperCamelCase__ : Any ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self : Tuple , UpperCamelCase__ : Union[np.ndarray, bytes, str] , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict , **UpperCamelCase__ : int ):
'''simple docstring'''
lowercase_ = {}
if "candidate_labels" in kwargs:
lowercase_ = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
lowercase_ = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : int="This is a sound of {}." ):
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowercase_ = requests.get(UpperCamelCase__ ).content
else:
with open(UpperCamelCase__ , """rb""" ) as f:
lowercase_ = f.read()
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase_ = ffmpeg_read(UpperCamelCase__ , self.feature_extractor.sampling_rate )
if not isinstance(UpperCamelCase__ , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
lowercase_ = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
lowercase_ = candidate_labels
lowercase_ = [hypothesis_template.format(UpperCamelCase__ ) for x in candidate_labels]
lowercase_ = self.tokenizer(UpperCamelCase__ , return_tensors=self.framework , padding=UpperCamelCase__ )
lowercase_ = [text_inputs]
return inputs
def UpperCAmelCase__ ( self : Dict , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
lowercase_ = model_inputs.pop("""candidate_labels""" )
lowercase_ = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , UpperCamelCase__ ):
lowercase_ = text_inputs[0]
else:
# Batching case.
lowercase_ = text_inputs[0][0]
lowercase_ = self.model(**UpperCamelCase__ , **UpperCamelCase__ )
lowercase_ = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Any ):
'''simple docstring'''
lowercase_ = model_outputs.pop("""candidate_labels""" )
lowercase_ = model_outputs["""logits"""][0]
if self.framework == "pt":
lowercase_ = logits.softmax(dim=0 )
lowercase_ = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
lowercase_ = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase__ , UpperCamelCase__ ) , key=lambda UpperCamelCase__ : -x[0] )
]
return result
| 412
|
def UpperCAmelCase_ ( UpperCAmelCase__ = "The quick brown fox jumps over the lazy dog" , ):
lowercase_ = set()
# Replace all the whitespace in our sentence
lowercase_ = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(UpperCAmelCase__ ) == 2_6
def UpperCAmelCase_ ( UpperCAmelCase__ = "The quick brown fox jumps over the lazy dog" , ):
lowercase_ = [False] * 2_6
for char in input_str:
if char.islower():
lowercase_ = True
elif char.isupper():
lowercase_ = True
return all(UpperCAmelCase__ )
def UpperCAmelCase_ ( UpperCAmelCase__ = "The quick brown fox jumps over the lazy dog" , ):
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def UpperCAmelCase_ ( ):
from timeit import timeit
lowercase_ = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=UpperCAmelCase__ ) )
print(timeit("""is_pangram_faster()""" , setup=UpperCAmelCase__ ) )
print(timeit("""is_pangram_fastest()""" , setup=UpperCAmelCase__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 412
| 1
|
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
A ='platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _a :
__a : Any = PegasusConfig
__a : Optional[int] = {}
__a : Tuple = """gelu"""
def __init__( self : Optional[Any] , lowercase : int , lowercase : List[str]=13 , lowercase : int=7 , lowercase : Tuple=True , lowercase : List[Any]=False , lowercase : Optional[Any]=99 , lowercase : Any=32 , lowercase : int=5 , lowercase : List[str]=4 , lowercase : int=37 , lowercase : List[str]=0.1 , lowercase : Optional[int]=0.1 , lowercase : List[Any]=20 , lowercase : str=2 , lowercase : Optional[int]=1 , lowercase : List[Any]=0 , ):
'''simple docstring'''
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = bos_token_id
def A ( self : Any ):
'''simple docstring'''
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase = prepare_pegasus_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def A ( self : Optional[int] , lowercase : Dict , lowercase : Any , lowercase : Any ):
'''simple docstring'''
UpperCAmelCase = 20
UpperCAmelCase = model_class_name(lowercase )
UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase , UpperCAmelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase , decoder_attention_mask=lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase , )
UpperCAmelCase = model.decode(lowercase , lowercase )
UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def A ( self : Optional[Any] , lowercase : str , lowercase : Dict , lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase = 20
UpperCAmelCase = model_class_name(lowercase )
UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase , UpperCAmelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase , lowercase )
UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase , decoder_attention_mask=lowercase , past_key_values=lowercase , decoder_position_ids=lowercase , )
UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase , decoder_position_ids=lowercase , )
UpperCAmelCase = model.decode(lowercase , lowercase , decoder_attention_mask=lowercase )
UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def snake_case_ (_a : Dict , _a : List[str] , _a : Optional[Any] , _a : List[str]=None , _a : List[Any]=None , ):
if attention_mask is None:
UpperCAmelCase = np.not_equal(_a , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
UpperCAmelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _a ( __a , unittest.TestCase ):
__a : Tuple = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__a : Union[str, Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__a : Dict = True
__a : Any = False
__a : Optional[Any] = False
__a : Tuple = False
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = FlaxPegasusModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase )
def A ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase , lowercase , lowercase )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase , lowercase , lowercase )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = self._prepare_for_class(lowercase , lowercase )
UpperCAmelCase = model_class(lowercase )
@jax.jit
def encode_jitted(lowercase : Optional[int] , lowercase : Dict=None , **lowercase : List[str] ):
return model.encode(input_ids=lowercase , attention_mask=lowercase )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase = encode_jitted(**lowercase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase = encode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase = model_class(lowercase )
UpperCAmelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
UpperCAmelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase : List[str] , lowercase : Union[str, Any] , lowercase : List[str] ):
return model.decode(
decoder_input_ids=lowercase , decoder_attention_mask=lowercase , encoder_outputs=lowercase , )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase = decode_jitted(**lowercase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase = decode_jitted(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) )
for jitted_output, output in zip(lowercase , lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def A ( self : int ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=lowercase )
UpperCAmelCase = np.ones((1, 1) )
UpperCAmelCase = model(lowercase )
self.assertIsNotNone(lowercase )
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
UpperCAmelCase = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
UpperCAmelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCAmelCase = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
UpperCAmelCase = tokenizer(lowercase , return_tensors='''np''' , truncation=lowercase , max_length=512 , padding=lowercase )
UpperCAmelCase = model.generate(**lowercase , num_beams=2 ).sequences
UpperCAmelCase = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
assert tgt_text == decoded
| 718
|
'''simple docstring'''
from __future__ import annotations
def snake_case_ (_a : str , _a : list[str] | None = None ):
UpperCAmelCase = word_bank or []
# create a table
UpperCAmelCase = len(_a ) + 1
UpperCAmelCase = []
for _ in range(_a ):
table.append([] )
# seed value
UpperCAmelCase = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_a ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_a )] == word:
UpperCAmelCase = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_a )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_a )]:
combination.reverse()
return table[len(_a )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 358
| 0
|
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=32 , __a=3 , __a=4 , __a=[10, 20, 30, 40] , __a=[2, 2, 3, 2] , __a=True , __a=True , __a=37 , __a="gelu" , __a=10 , __a=0.02 , __a=["stage2", "stage3", "stage4"] , __a=3 , __a=None , ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = num_channels
_UpperCamelCase = num_stages
_UpperCamelCase = hidden_sizes
_UpperCamelCase = depths
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = out_features
_UpperCamelCase = num_labels
_UpperCamelCase = scope
_UpperCamelCase = num_stages
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__a , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=__a , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def UpperCAmelCase ( self , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = UperNetForSemanticSegmentation(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size))
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowercase__ = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = UperNetModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
return
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(__a)
_UpperCamelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a)
@unittest.skip(reason='''UperNet does not use inputs_embeds''')
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''')
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''')
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='''UperNet does not have a base model''')
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''')
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
pass
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a):
_UpperCamelCase = model_class(__a)
model.to(__a)
model.eval()
with torch.no_grad():
_UpperCamelCase = model(**self._prepare_for_class(__a , __a))
_UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(__a) , expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = True
check_hidden_states_output(__a , __a , __a)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase = True
check_hidden_states_output(__a , __a , __a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = _config_zero_init(__a)
_UpperCamelCase = _config_zero_init(configs_no_init.backbone_config)
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(config=__a)
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip(reason='''UperNet does not have tied weights''')
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
pass
@slow
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained(__a)
self.assertIsNotNone(__a)
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''', repo_type='''dataset''', filename='''ADE_val_00000001.jpg''' )
_UpperCamelCase = Image.open(__snake_case ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class _UpperCAmelCase( unittest.TestCase ):
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''')
_UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''').to(__a)
_UpperCamelCase = prepare_img()
_UpperCamelCase = processor(images=__a , return_tensors='''pt''').to(__a)
with torch.no_grad():
_UpperCamelCase = model(**__a)
_UpperCamelCase = torch.Size((1, model.config.num_labels, 5_12, 5_12))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __a , atol=1e-4))
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''')
_UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''').to(__a)
_UpperCamelCase = prepare_img()
_UpperCamelCase = processor(images=__a , return_tensors='''pt''').to(__a)
with torch.no_grad():
_UpperCamelCase = model(**__a)
_UpperCamelCase = torch.Size((1, model.config.num_labels, 5_12, 5_12))
self.assertEqual(outputs.logits.shape , __a)
_UpperCamelCase = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]]).to(__a)
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __a , atol=1e-4))
| 19
|
'''simple docstring'''
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def A__ ( UpperCAmelCase_ ):
if "model" in orig_key:
_UpperCamelCase : List[Any] = orig_key.replace('model.' , '' )
if "norm1" in orig_key:
_UpperCamelCase : List[Any] = orig_key.replace('norm1' , 'attention.output.LayerNorm' )
if "norm2" in orig_key:
_UpperCamelCase : int = orig_key.replace('norm2' , 'output.LayerNorm' )
if "norm" in orig_key:
_UpperCamelCase : str = orig_key.replace('norm' , 'LayerNorm' )
if "transformer" in orig_key:
_UpperCamelCase : int = orig_key.split('.' )[0].split('_' )[-1]
_UpperCamelCase : Tuple = orig_key.replace(f'transformer_{layer_num}' , f'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
_UpperCamelCase : List[Any] = orig_key.replace('mha.attn' , 'attention.self' )
if "mha" in orig_key:
_UpperCamelCase : Union[str, Any] = orig_key.replace('mha' , 'attention' )
if "W_q" in orig_key:
_UpperCamelCase : Union[str, Any] = orig_key.replace('W_q' , 'self.query' )
if "W_k" in orig_key:
_UpperCamelCase : List[Any] = orig_key.replace('W_k' , 'self.key' )
if "W_v" in orig_key:
_UpperCamelCase : List[Any] = orig_key.replace('W_v' , 'self.value' )
if "ff1" in orig_key:
_UpperCamelCase : Optional[Any] = orig_key.replace('ff1' , 'intermediate.dense' )
if "ff2" in orig_key:
_UpperCamelCase : List[str] = orig_key.replace('ff2' , 'output.dense' )
if "ff" in orig_key:
_UpperCamelCase : Union[str, Any] = orig_key.replace('ff' , 'output.dense' )
if "mlm_class" in orig_key:
_UpperCamelCase : Tuple = orig_key.replace('mlm.mlm_class' , 'cls.predictions.decoder' )
if "mlm" in orig_key:
_UpperCamelCase : int = orig_key.replace('mlm' , 'cls.predictions.transform' )
if "cls" not in orig_key:
_UpperCamelCase : Optional[int] = 'yoso.' + orig_key
return orig_key
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
for key in orig_state_dict.copy().keys():
_UpperCamelCase : Any = orig_state_dict.pop(UpperCAmelCase_ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
_UpperCamelCase : Union[str, Any] = val
_UpperCamelCase : Any = orig_state_dict['cls.predictions.decoder.bias']
_UpperCamelCase : Union[str, Any] = torch.arange(UpperCAmelCase_ ).expand((1, -1) ) + 2
return orig_state_dict
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )['model_state_dict']
_UpperCamelCase : List[str] = YosoConfig.from_json_file(UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = YosoForMaskedLM(UpperCAmelCase_ )
_UpperCamelCase : Tuple = convert_checkpoint_helper(config.max_position_embeddings , UpperCAmelCase_ )
print(model.load_state_dict(UpperCAmelCase_ ) )
model.eval()
model.save_pretrained(UpperCAmelCase_ )
print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
snake_case_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
snake_case_ : List[Any] = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 195
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_A = logging.get_logger(__name__)
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
def __init__( self : Dict , *A_ : List[Any] , **A_ : Optional[int] )-> None:
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , A_ , )
super().__init__(*A_ , **A_ )
| 228
|
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_A = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
_A = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def lowercase (_snake_case ,_snake_case ,_snake_case ) -> Any:
'''simple docstring'''
__UpperCamelCase = SavedModel()
__UpperCamelCase = []
with open(os.path.join(_snake_case ,"utils" ,"tf_ops" ,"onnx.json" ) ) as f:
__UpperCamelCase = json.load(_snake_case )["opsets"]
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_snake_case )] )
with open(_snake_case ,"rb" ) as f:
saved_model.ParseFromString(f.read() )
__UpperCamelCase = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
__UpperCamelCase = sorted(_snake_case )
__UpperCamelCase = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_snake_case )
if strict and len(_snake_case ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(_snake_case ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*_snake_case ,sep="\n" )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
_A = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 228
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_lowerCamelCase = logging.get_logger(__name__)
class _snake_case (__SCREAMING_SNAKE_CASE):
def __init__( self ,*_snake_case ,**_snake_case ):
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." ,_snake_case ,)
super().__init__(*_snake_case ,**_snake_case )
| 71
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : str =['''input_features''']
def __init__( self: Dict , __a: Dict=80 , __a: str=16_000 , __a: int=160 , __a: Tuple=30 , __a: List[str]=400 , __a: Union[str, Any]=0.0 , __a: str=False , **__a: List[Any] , )-> Union[str, Any]:
super().__init__(
feature_size=__a , sampling_rate=__a , padding_value=__a , return_attention_mask=__a , **__a , )
lowerCamelCase : List[str] = n_fft
lowerCamelCase : Optional[int] = hop_length
lowerCamelCase : List[Any] = chunk_length
lowerCamelCase : Tuple = chunk_length * sampling_rate
lowerCamelCase : Optional[Any] = self.n_samples // hop_length
lowerCamelCase : int = sampling_rate
lowerCamelCase : Optional[int] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__a , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=__a , norm="""slaney""" , mel_scale="""slaney""" , )
def a__ ( self: int , __a: np.array )-> np.ndarray:
lowerCamelCase : Tuple = spectrogram(
__a , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
lowerCamelCase : Union[str, Any] = log_spec[:, :-1]
lowerCamelCase : Optional[Any] = np.maximum(__a , log_spec.max() - 8.0 )
lowerCamelCase : Any = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a__ ( __a: List[np.ndarray] , __a: List[np.ndarray] , __a: float = 0.0 )-> List[np.ndarray]:
if attention_mask is not None:
lowerCamelCase : int = np.array(__a , np.intaa )
lowerCamelCase : Any = []
for vector, length in zip(__a , attention_mask.sum(-1 ) ):
lowerCamelCase : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowerCamelCase : Tuple = padding_value
normed_input_values.append(__a )
else:
lowerCamelCase : Any = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self: str , __a: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __a: bool = True , __a: Optional[int] = None , __a: Optional[Union[str, TensorType]] = None , __a: Optional[bool] = None , __a: Optional[str] = "max_length" , __a: Optional[int] = None , __a: Optional[int] = None , __a: Optional[bool] = None , **__a: List[Any] , )-> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase : int = isinstance(__a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase : Optional[int] = is_batched_numpy or (
isinstance(__a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase : Tuple = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__a , np.ndarray ):
lowerCamelCase : str = np.asarray(__a , dtype=np.floataa )
elif isinstance(__a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase : List[Any] = [np.asarray([raw_speech] ).T]
lowerCamelCase : Optional[int] = BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
lowerCamelCase : Tuple = self.pad(
__a , padding=__a , max_length=max_length if max_length else self.n_samples , truncation=__a , pad_to_multiple_of=__a , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCamelCase : int = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
lowerCamelCase : Any = np.stack(padded_inputs["""input_features"""] , axis=0 )
# make sure list is in array format
lowerCamelCase : int = padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 )
lowerCamelCase : str = [self._np_extract_fbank_features(__a ) for waveform in input_features[0]]
if isinstance(input_features[0] , __a ):
lowerCamelCase : List[Any] = [np.asarray(__a , dtype=np.floataa ) for feature in input_features]
else:
lowerCamelCase : int = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCamelCase : Dict = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
lowerCamelCase : Optional[Any] = padded_inputs.convert_to_tensors(__a )
return padded_inputs
def a__ ( self: Optional[int] )-> Dict[str, Any]:
lowerCamelCase : Dict = copy.deepcopy(self.__dict__ )
lowerCamelCase : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 222
| 0
|
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(_A ):
__lowerCAmelCase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__lowerCAmelCase = FlaxAutoModel.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def A__ ( self ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(_A ):
__lowerCAmelCase = AutoConfig.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
__lowerCAmelCase = FlaxAutoModel.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , _A )
@slow
def A__ ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
__lowerCAmelCase = AutoTokenizer.from_pretrained(_A )
__lowerCAmelCase = FlaxBertModel.from_pretrained(_A )
__lowerCAmelCase = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**_A ):
return model(**_A )
eval(**_A ).block_until_ready()
@slow
def A__ ( self ):
for model_name in ["roberta-base", "roberta-large"]:
__lowerCAmelCase = AutoTokenizer.from_pretrained(_A )
__lowerCAmelCase = FlaxRobertaModel.from_pretrained(_A )
__lowerCAmelCase = tokenizer('Do you support jax jitted function?' , return_tensors=TensorType.JAX )
@jax.jit
def eval(**_A ):
return model(**_A )
eval(**_A ).block_until_ready()
def A__ ( self ):
with self.assertRaisesRegex(
_A , 'bert-base is not a local folder and is not a valid model identifier' ):
__lowerCAmelCase = FlaxAutoModel.from_pretrained('bert-base' )
def A__ ( self ):
with self.assertRaisesRegex(
_A , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__lowerCAmelCase = FlaxAutoModel.from_pretrained(_A , revision='aaaaaa' )
def A__ ( self ):
with self.assertRaisesRegex(
_A , 'hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack' , ):
__lowerCAmelCase = FlaxAutoModel.from_pretrained('hf-internal-testing/config-no-model' )
def A__ ( self ):
with self.assertRaisesRegex(_A , 'Use `from_pt=True` to load this model' ):
__lowerCAmelCase = FlaxAutoModel.from_pretrained('hf-internal-testing/tiny-bert-pt-only' )
| 102
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class snake_case_ ( _a ):
"""simple docstring"""
__UpperCAmelCase =42
class snake_case_ ( _a , _a ):
"""simple docstring"""
__UpperCAmelCase =True
@register_to_config
def __init__( self , _A = 3 , _A = 3 , _A = ("DownEncoderBlock2D",) , _A = ("UpDecoderBlock2D",) , _A = (6_4,) , _A = 1 , _A = "silu" , _A = 4 , _A = 3_2 , _A = 3_2 , _A = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
__lowerCAmelCase = Encoder(
in_channels=_A , out_channels=_A , down_block_types=_A , block_out_channels=_A , layers_per_block=_A , act_fn=_A , norm_num_groups=_A , double_z=_A , )
# pass init params to Decoder
__lowerCAmelCase = Decoder(
in_channels=_A , out_channels=_A , up_block_types=_A , block_out_channels=_A , layers_per_block=_A , norm_num_groups=_A , act_fn=_A , )
__lowerCAmelCase = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__lowerCAmelCase = nn.Convad(_A , _A , 1 )
__lowerCAmelCase = False
__lowerCAmelCase = False
# only relevant if vae tiling is enabled
__lowerCAmelCase = self.config.sample_size
__lowerCAmelCase = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__lowerCAmelCase = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__lowerCAmelCase = 0.25
def A__ ( self , _A , _A=False ):
if isinstance(_A , (Encoder, Decoder) ):
__lowerCAmelCase = value
def A__ ( self , _A = True ):
__lowerCAmelCase = use_tiling
def A__ ( self ):
self.enable_tiling(_A )
def A__ ( self ):
__lowerCAmelCase = True
def A__ ( self ):
__lowerCAmelCase = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A__ ( self ):
__lowerCAmelCase = {}
def fn_recursive_add_processors(_A , _A , _A ):
if hasattr(_A , 'set_processor' ):
__lowerCAmelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" , _A , _A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_A , _A , _A )
return processors
def A__ ( self , _A ):
__lowerCAmelCase = len(self.attn_processors.keys() )
if isinstance(_A , _A ) and len(_A ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(_A )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_A , _A , _A ):
if hasattr(_A , 'set_processor' ):
if not isinstance(_A , _A ):
module.set_processor(_A )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" , _A , _A )
for name, module in self.named_children():
fn_recursive_attn_processor(_A , _A , _A )
def A__ ( self ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def A__ ( self , _A , _A = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_A , return_dict=_A )
if self.use_slicing and x.shape[0] > 1:
__lowerCAmelCase = [self.encoder(_A ) for x_slice in x.split(1 )]
__lowerCAmelCase = torch.cat(_A )
else:
__lowerCAmelCase = self.encoder(_A )
__lowerCAmelCase = self.quant_conv(_A )
__lowerCAmelCase = DiagonalGaussianDistribution(_A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_A )
def A__ ( self , _A , _A = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_A , return_dict=_A )
__lowerCAmelCase = self.post_quant_conv(_A )
__lowerCAmelCase = self.decoder(_A )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
@apply_forward_hook
def A__ ( self , _A , _A = True ):
if self.use_slicing and z.shape[0] > 1:
__lowerCAmelCase = [self._decode(_A ).sample for z_slice in z.split(1 )]
__lowerCAmelCase = torch.cat(_A )
else:
__lowerCAmelCase = self._decode(_A ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_A )
def A__ ( self , _A , _A , _A ):
__lowerCAmelCase = min(a.shape[2] , b.shape[2] , _A )
for y in range(_A ):
__lowerCAmelCase = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def A__ ( self , _A , _A , _A ):
__lowerCAmelCase = min(a.shape[3] , b.shape[3] , _A )
for x in range(_A ):
__lowerCAmelCase = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def A__ ( self , _A , _A = True ):
__lowerCAmelCase = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__lowerCAmelCase = int(self.tile_latent_min_size * self.tile_overlap_factor )
__lowerCAmelCase = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__lowerCAmelCase = []
for i in range(0 , x.shape[2] , _A ):
__lowerCAmelCase = []
for j in range(0 , x.shape[3] , _A ):
__lowerCAmelCase = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__lowerCAmelCase = self.encoder(_A )
__lowerCAmelCase = self.quant_conv(_A )
row.append(_A )
rows.append(_A )
__lowerCAmelCase = []
for i, row in enumerate(_A ):
__lowerCAmelCase = []
for j, tile in enumerate(_A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCAmelCase = self.blend_v(rows[i - 1][j] , _A , _A )
if j > 0:
__lowerCAmelCase = self.blend_h(row[j - 1] , _A , _A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_A , dim=3 ) )
__lowerCAmelCase = torch.cat(_A , dim=2 )
__lowerCAmelCase = DiagonalGaussianDistribution(_A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_A )
def A__ ( self , _A , _A = True ):
__lowerCAmelCase = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__lowerCAmelCase = int(self.tile_sample_min_size * self.tile_overlap_factor )
__lowerCAmelCase = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__lowerCAmelCase = []
for i in range(0 , z.shape[2] , _A ):
__lowerCAmelCase = []
for j in range(0 , z.shape[3] , _A ):
__lowerCAmelCase = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__lowerCAmelCase = self.post_quant_conv(_A )
__lowerCAmelCase = self.decoder(_A )
row.append(_A )
rows.append(_A )
__lowerCAmelCase = []
for i, row in enumerate(_A ):
__lowerCAmelCase = []
for j, tile in enumerate(_A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__lowerCAmelCase = self.blend_v(rows[i - 1][j] , _A , _A )
if j > 0:
__lowerCAmelCase = self.blend_h(row[j - 1] , _A , _A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_A , dim=3 ) )
__lowerCAmelCase = torch.cat(_A , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
def A__ ( self , _A , _A = False , _A = True , _A = None , ):
__lowerCAmelCase = sample
__lowerCAmelCase = self.encode(_A ).latent_dist
if sample_posterior:
__lowerCAmelCase = posterior.sample(generator=_A )
else:
__lowerCAmelCase = posterior.mode()
__lowerCAmelCase = self.decode(_A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_A )
| 102
| 1
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __lowercase( _UpperCAmelCase ):
'''simple docstring'''
@staticmethod
@abstractmethod
def snake_case_ ( __a ):
raise NotImplementedError()
@abstractmethod
def snake_case_ ( self ):
raise NotImplementedError()
| 594
|
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Tuple=3_2 , SCREAMING_SNAKE_CASE_ : Dict=1_0 , SCREAMING_SNAKE_CASE_ : Any=1_0_0 , SCREAMING_SNAKE_CASE_ : Any=1_0_2_6 , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : List[Any]="data/tokenized_stories_train_wikitext103.jbl" , SCREAMING_SNAKE_CASE_ : Optional[Any]="igf_context_pairs.jbl" , ) -> Dict:
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Union[str, Any] = generate_datasets(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , number=SCREAMING_SNAKE_CASE_ , min_len=1_0_2_6 , trim=SCREAMING_SNAKE_CASE_ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
SCREAMING_SNAKE_CASE_ : List[str] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
SCREAMING_SNAKE_CASE_ : List[Any] = load_gpta("gpt2" ).to(SCREAMING_SNAKE_CASE_ )
print("computing perplexity on objective set" )
SCREAMING_SNAKE_CASE_ : Tuple = compute_perplexity(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).item()
print("perplexity on objective set:" , SCREAMING_SNAKE_CASE_ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str=1_5 , SCREAMING_SNAKE_CASE_ : int=1_2_8 , SCREAMING_SNAKE_CASE_ : Optional[int]=1_0_0 , SCREAMING_SNAKE_CASE_ : Any="igf_model.pt" , ) -> List[str]:
"""simple docstring"""
set_seed(4_2 )
# Load pre-trained model
SCREAMING_SNAKE_CASE_ : Optional[Any] = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
SCREAMING_SNAKE_CASE_ : str = SecondaryLearner(SCREAMING_SNAKE_CASE_ )
# Train secondary learner
SCREAMING_SNAKE_CASE_ : List[str] = train_secondary_learner(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , max_epochs=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , eval_freq=1_0_0 , igf_model_path=SCREAMING_SNAKE_CASE_ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any]=3_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_0_0_0 , SCREAMING_SNAKE_CASE_ : Tuple=1_6 , SCREAMING_SNAKE_CASE_ : Dict=1.0 , SCREAMING_SNAKE_CASE_ : List[Any]=recopy_gpta , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : int=1_0 , SCREAMING_SNAKE_CASE_ : List[Any]="gpt2_finetuned.pt" , ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
SCREAMING_SNAKE_CASE_ : List[Any] = RandomSampler(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Dict = DataLoader(SCREAMING_SNAKE_CASE_ , sampler=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Tuple = max_steps // (len(SCREAMING_SNAKE_CASE_ )) + 1
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.zeros((1, context_len) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = recopy_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.train()
if secondary_learner is not None:
secondary_learner.to(SCREAMING_SNAKE_CASE_ )
secondary_learner.eval()
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : Any = []
# Compute the performance of the transformer model at the beginning
SCREAMING_SNAKE_CASE_ : Tuple = compute_perplexity(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
test_perps.append(SCREAMING_SNAKE_CASE_ )
print("Test perplexity, step" , SCREAMING_SNAKE_CASE_ , ":" , SCREAMING_SNAKE_CASE_ )
for epoch in range(int(SCREAMING_SNAKE_CASE_ ) ):
for step, example in enumerate(SCREAMING_SNAKE_CASE_ ):
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE_ : List[Any] = random.randint(0 , example.size(2 ) - context_len - 1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
SCREAMING_SNAKE_CASE_ : Dict = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = True
if secondary_learner is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = secondary_learner.forward(
torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(SCREAMING_SNAKE_CASE_ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 1_0:
SCREAMING_SNAKE_CASE_ : str = -1
if predicted_q < threshold:
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE_ : List[str] = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
SCREAMING_SNAKE_CASE_ : Optional[int] = compute_perplexity(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
test_perps.append(SCREAMING_SNAKE_CASE_ )
print("Test perplexity, step" , SCREAMING_SNAKE_CASE_ , ":" , SCREAMING_SNAKE_CASE_ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 6_0:
break
if max_steps > 0 and global_step > 6_0:
break
# save finetuned transformer model
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=3_2 , type=SCREAMING_SNAKE_CASE_ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=1_0_0 , type=SCREAMING_SNAKE_CASE_ , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=1_0_0 , type=SCREAMING_SNAKE_CASE_ , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1_0_0_0 , type=SCREAMING_SNAKE_CASE_ , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=1_2_8 , type=SCREAMING_SNAKE_CASE_ , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=1_6 , type=SCREAMING_SNAKE_CASE_ , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=1_0 , type=SCREAMING_SNAKE_CASE_ , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=1_0_0 , type=SCREAMING_SNAKE_CASE_ , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1_0_2_6 , type=SCREAMING_SNAKE_CASE_ , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=1_5 , type=SCREAMING_SNAKE_CASE_ , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=SCREAMING_SNAKE_CASE_ , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=SCREAMING_SNAKE_CASE_ , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=3_2 , max_steps=1_0 , size_objective_set=1_0_0 , min_len=1_0_2_6 , trim=SCREAMING_SNAKE_CASE_ , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
SCREAMING_SNAKE_CASE_ : Any = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
SCREAMING_SNAKE_CASE_ : str = training_secondary_learner(
SCREAMING_SNAKE_CASE_ , secondary_learner_max_epochs=1_5 , secondary_learner_batch_size=1_2_8 , eval_freq=1_0_0 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
SCREAMING_SNAKE_CASE_ : Optional[int] = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(4_2 )
# Generate train and test data to train and evaluate gpt2 model
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Any = generate_datasets(
context_len=3_2 , file="data/tokenized_stories_train_wikitext103.jbl" , number=1_0_0 , min_len=1_0_2_6 , trim=SCREAMING_SNAKE_CASE_ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , context_len=3_2 , max_steps=1_0_0_0 , batch_size=1_6 , threshold=1.0 , recopy_model=SCREAMING_SNAKE_CASE_ , secondary_learner=SCREAMING_SNAKE_CASE_ , eval_interval=1_0 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 421
| 0
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCamelCase ( A_):
'''simple docstring'''
_snake_case = ['''image_processor''', '''tokenizer''']
_snake_case = '''ViltImageProcessor'''
_snake_case = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , a_=None , a_=None , **a_ ) -> Optional[int]:
lowercase : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a_ , )
lowercase : Union[str, Any] = kwargs.pop("feature_extractor" )
lowercase : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a_ , a_ )
lowercase : Optional[int] = self.image_processor
def __call__( self , a_ , a_ = None , a_ = True , a_ = False , a_ = None , a_ = None , a_ = 0 , a_ = None , a_ = None , a_ = None , a_ = False , a_ = False , a_ = False , a_ = False , a_ = True , a_ = None , **a_ , ) -> Optional[Any]:
lowercase : Any = self.tokenizer(
text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , )
# add pixel_values + pixel_mask
lowercase : List[Any] = self.image_processor(a_ , return_tensors=a_ )
encoding.update(a_ )
return encoding
def a__ ( self , *a_ , **a_ ) -> int:
return self.tokenizer.batch_decode(*a_ , **a_ )
def a__ ( self , *a_ , **a_ ) -> int:
return self.tokenizer.decode(*a_ , **a_ )
@property
def a__ ( self ) -> Tuple:
lowercase : Optional[Any] = self.tokenizer.model_input_names
lowercase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def a__ ( self ) -> Optional[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a_ , )
return self.image_processor_class
@property
def a__ ( self ) -> Optional[int]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a_ , )
return self.image_processor
| 708
|
'''simple docstring'''
import qiskit
def _A ( A ,A ) -> qiskit.result.counts.Counts:
lowercase : Tuple = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
lowercase : List[Any] = qiskit.QuantumCircuit(A ,A )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] ,[0, 1] )
# Execute the circuit on the qasm simulator
lowercase : Optional[int] = qiskit.execute(A ,A ,shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(A )
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = single_qubit_measure(2, 2)
print(F'''Total count for various states are: {counts}''')
| 425
| 0
|
import json
import sys
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : Dict ) -> Dict:
with open(snake_case__ , encoding='utf-8' ) as f:
UpperCamelCase : Optional[Any] = json.load(snake_case__ )
UpperCamelCase : int = ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(snake_case__ ):
UpperCamelCase : List[str] = results[benchmark_name]
UpperCamelCase : Tuple = benchmark_name.split('/' )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
UpperCamelCase : Optional[Any] = '| metric |'
UpperCamelCase : List[Any] = '|--------|'
UpperCamelCase : str = '| new / old (diff) |'
for metric_name in sorted(snake_case__ ):
UpperCamelCase : Dict = benchmark_res[metric_name]
UpperCamelCase : str = metric_vals['new']
UpperCamelCase : Optional[int] = metric_vals.get('old' , snake_case__ )
UpperCamelCase : Optional[Any] = metric_vals.get('diff' , snake_case__ )
UpperCamelCase : Union[str, Any] = F""" {new_val:f}""" if isinstance(snake_case__ , (int, float) ) else 'None'
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(snake_case__ , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(snake_case__ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.writelines('\n'.join(snake_case__ ) )
if __name__ == "__main__":
__UpperCAmelCase = sys.argv[1]
__UpperCAmelCase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 40
|
'''simple docstring'''
import re
def __UpperCAmelCase (lowercase__ ) -> bool:
'''simple docstring'''
a_ = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowercase__ ,lowercase__ ) )
if __name__ == "__main__":
a_ = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 685
| 0
|
def UpperCAmelCase__ ( _A , _A = 0 ):
"""simple docstring"""
a_ = length or len(_lowerCamelCase )
a_ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
a_ = list_data[i + 1], list_data[i]
a_ = True
return list_data if not swapped else bubble_sort(_lowerCamelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class __lowercase ( enum.Enum ):
_lowerCAmelCase = 0
_lowerCAmelCase = 1
_lowerCAmelCase = 2
@add_end_docstrings(a__ )
class __lowercase ( a__ ):
_lowerCAmelCase = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : Tuple , *lowercase__ : Tuple , **lowercase__ : Any ):
super().__init__(*lowercase__ , **lowercase__ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
a_ = None
if self.model.config.prefix is not None:
a_ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
a_ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
a_ , a_ , a_ = self._sanitize_parameters(prefix=lowercase__ , **self._forward_params )
a_ = {**self._preprocess_params, **preprocess_params}
a_ = {**self._forward_params, **forward_params}
def __magic_name__ ( self : Any , lowercase__ : Tuple=None , lowercase__ : List[str]=None , lowercase__ : Optional[Any]=None , lowercase__ : Union[str, Any]=None , lowercase__ : Union[str, Any]=None , lowercase__ : Any=None , lowercase__ : Optional[Any]=None , lowercase__ : Dict=None , **lowercase__ : Optional[Any] , ):
a_ = {}
if prefix is not None:
a_ = prefix
if prefix:
a_ = self.tokenizer(
lowercase__ , padding=lowercase__ , add_special_tokens=lowercase__ , return_tensors=self.framework )
a_ = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
''' [None, \'hole\']''' )
a_ = handle_long_generation
preprocess_params.update(lowercase__ )
a_ = generate_kwargs
a_ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
a_ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
a_ = ReturnType.TENSORS
if return_type is not None:
a_ = return_type
if clean_up_tokenization_spaces is not None:
a_ = clean_up_tokenization_spaces
if stop_sequence is not None:
a_ = self.tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
if len(lowercase__ ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
a_ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __magic_name__ ( self : int , *lowercase__ : int , **lowercase__ : Union[str, Any] ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*lowercase__ , **lowercase__ )
def __call__( self : Union[str, Any] , lowercase__ : List[Any] , **lowercase__ : str ):
return super().__call__(lowercase__ , **lowercase__ )
def __magic_name__ ( self : Any , lowercase__ : List[Any] , lowercase__ : int="" , lowercase__ : Union[str, Any]=None , **lowercase__ : int ):
a_ = self.tokenizer(
prefix + prompt_text , padding=lowercase__ , add_special_tokens=lowercase__ , return_tensors=self.framework )
a_ = prompt_text
if handle_long_generation == "hole":
a_ = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
a_ = generate_kwargs['''max_new_tokens''']
else:
a_ = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
a_ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
a_ = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
a_ = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def __magic_name__ ( self : Union[str, Any] , lowercase__ : Dict , **lowercase__ : Tuple ):
a_ = model_inputs['''input_ids''']
a_ = model_inputs.get('''attention_mask''' , lowercase__ )
# Allow empty prompts
if input_ids.shape[1] == 0:
a_ = None
a_ = None
a_ = 1
else:
a_ = input_ids.shape[0]
a_ = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
a_ = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
a_ = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
a_ = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
a_ = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
a_ = self.model.generate(input_ids=lowercase__ , attention_mask=lowercase__ , **lowercase__ )
a_ = generated_sequence.shape[0]
if self.framework == "pt":
a_ = generated_sequence.reshape(lowercase__ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
a_ = tf.reshape(lowercase__ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __magic_name__ ( self : List[str] , lowercase__ : Optional[Any] , lowercase__ : Dict=ReturnType.FULL_TEXT , lowercase__ : Tuple=True ):
a_ = model_outputs['''generated_sequence'''][0]
a_ = model_outputs['''input_ids''']
a_ = model_outputs['''prompt_text''']
a_ = generated_sequence.numpy().tolist()
a_ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
a_ = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
a_ = self.tokenizer.decode(
lowercase__ , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
a_ = 0
else:
a_ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__ , ) )
if return_type == ReturnType.FULL_TEXT:
a_ = prompt_text + text[prompt_length:]
else:
a_ = text[prompt_length:]
a_ = {'''generated_text''': all_text}
records.append(lowercase__ )
return records
| 143
| 0
|
'''simple docstring'''
import numpy
# List of input, output pairs
__UpperCAmelCase = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
__UpperCAmelCase = (((515, 22, 13), 555), ((61, 35, 49), 150))
__UpperCAmelCase = [2, 4, 1, 5]
__UpperCAmelCase = len(train_data)
__UpperCAmelCase = 0.009
def _snake_case ( A , A="train" ) -> Optional[Any]:
return calculate_hypothesis_value(A , A ) - output(
A , A )
def _snake_case ( A ) -> int:
lowerCAmelCase__ = 0
for i in range(len(A ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _snake_case ( A , A ) -> List[Any]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _snake_case ( A , A ) -> int:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _snake_case ( A , A=m ) -> Tuple:
lowerCAmelCase__ = 0
for i in range(A ):
if index == -1:
summation_value += _error(A )
else:
summation_value += _error(A ) * train_data[i][0][index]
return summation_value
def _snake_case ( A ) -> List[str]:
lowerCAmelCase__ = summation_of_cost_derivative(A , A ) / m
return cost_derivative_value
def _snake_case ( ) -> str:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCAmelCase__ = 0.000_002
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
while True:
j += 1
lowerCAmelCase__ = [0, 0, 0, 0]
for i in range(0 , len(A ) ):
lowerCAmelCase__ = get_cost_derivative(i - 1 )
lowerCAmelCase__ = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
A , A , atol=A , rtol=A , ):
break
lowerCAmelCase__ = temp_parameter_vector
print(('''Number of iterations:''', j) )
def _snake_case ( ) -> Union[str, Any]:
for i in range(len(A ) ):
print(('''Actual output value:''', output(A , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(A , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 90
|
'''simple docstring'''
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_UpperCamelCase : List[str] = logging.getLogger(__name__)
_UpperCamelCase : int = 'pytorch_model.bin'
@dataclasses.dataclass
class snake_case__ :
a_ = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."})
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class snake_case__ :
a_ = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."})
a_ = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."})
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "A csv or a json file containing the validation data."})
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "The name of the task to train on."} , )
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "The list of labels for the task."})
@dataclasses.dataclass
class snake_case__ :
a_ = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."})
a_ = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."})
a_ = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
a_ = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
a_ = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
a_ = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
a_ = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "Random seed for initialization."} , )
def __UpperCAmelCase ( A : str , A : Optional[Any] , A : List[Any] , A : Any , A : Union[str, Any] , A : Dict ) -> int:
UpperCAmelCase_ : Tuple = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
UpperCAmelCase_ : Tuple = dataset.filter(lambda A : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
UpperCAmelCase_ : Dict = int(eval_result * len(A ) )
print(A )
UpperCAmelCase_ : Optional[int] = dataset.sort('''probability''' , reverse=A )
UpperCAmelCase_ : Optional[Any] = dataset.select(range(A ) )
UpperCAmelCase_ : Dict = dataset.remove_columns(['''label''', '''probability'''] )
UpperCAmelCase_ : List[Any] = dataset.rename_column('''prediction''' , '''label''' )
UpperCAmelCase_ : List[str] = dataset.map(lambda A : {"label": idalabel[example["label"]]} )
UpperCAmelCase_ : Union[str, Any] = dataset.shuffle(seed=args.seed )
UpperCAmelCase_ : str = os.path.join(A , F"train_pseudo.{args.data_file_extension}" )
if args.data_file_extension == "csv":
dataset.to_csv(A , index=A )
else:
dataset.to_json(A )
def __UpperCAmelCase ( A : Any , A : int , A : Union[str, Any] , A : Dict , **A : Any ) -> Dict:
UpperCAmelCase_ : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
UpperCAmelCase_ : Tuple = STModelArguments(model_name_or_path=A )
UpperCAmelCase_ : int = STDataArguments(train_file=A , infer_file=A )
UpperCAmelCase_ : Optional[Any] = STTrainingArguments(output_dir=A )
UpperCAmelCase_ : Optional[int] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(A ).items():
setattr(A , A , A )
for key, value in kwargs.items():
if hasattr(A , A ):
setattr(A , A , A )
# Sanity checks
UpperCAmelCase_ : List[str] = {}
UpperCAmelCase_ : Any = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
UpperCAmelCase_ : List[Any] = args.train_file
UpperCAmelCase_ : Optional[int] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
UpperCAmelCase_ : List[str] = args.eval_file
for key in data_files:
UpperCAmelCase_ : Dict = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F"`{key}_file` should be a csv or a json file."
if args.data_file_extension is None:
UpperCAmelCase_ : Any = extension
else:
assert extension == args.data_file_extension, F"`{key}_file` should be a {args.data_file_extension} file`."
assert (
args.eval_metric in datasets.list_metrics()
), F"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
UpperCAmelCase_ : Any = F"{args.output_dir}/self-train_iter-{{}}".format
UpperCAmelCase_ : Any = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=A )
os.makedirs(A , exist_ok=A )
accelerator.wait_for_everyone()
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : int = False
# Show the progress bar
UpperCAmelCase_ : int = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
UpperCAmelCase_ : str = data_dir_format(A )
assert os.path.exists(A )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
UpperCAmelCase_ : List[Any] = os.path.join(A , '''stage-1''' )
UpperCAmelCase_ : Dict = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(A , A ):
arguments_dict.update({key: value} )
UpperCAmelCase_ : int = os.path.join(A , '''best-checkpoint''' , A )
if os.path.exists(A ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , A , A , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , A )
finetune(**A )
accelerator.wait_for_everyone()
assert os.path.exists(A )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , A )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
UpperCAmelCase_ : Any = os.path.join(A , '''best-checkpoint''' )
UpperCAmelCase_ : Optional[int] = os.path.join(A , '''stage-2''' )
# Update arguments_dict
UpperCAmelCase_ : List[Any] = model_path
UpperCAmelCase_ : Optional[int] = data_files['''train''']
UpperCAmelCase_ : Union[str, Any] = current_output_dir
UpperCAmelCase_ : Optional[int] = os.path.join(A , '''best-checkpoint''' , A )
if os.path.exists(A ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , A , A , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , A )
finetune(**A )
accelerator.wait_for_everyone()
assert os.path.exists(A )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , A )
UpperCAmelCase_ : Dict = iteration
UpperCAmelCase_ : Optional[Any] = data_dir_format(iteration + 1 )
UpperCAmelCase_ : Any = AutoConfig.from_pretrained(os.path.join(A , '''best-checkpoint''' ) )
UpperCAmelCase_ : Optional[Any] = config.idalabel
UpperCAmelCase_ : List[Any] = os.path.join(A , '''eval_results_best-checkpoint.json''' )
UpperCAmelCase_ : Union[str, Any] = os.path.join(A , '''test_results_best-checkpoint.json''' )
assert os.path.exists(A )
with open(A , '''r''' ) as f:
UpperCAmelCase_ : Optional[int] = float(json.load(A )[args.eval_metric] )
UpperCAmelCase_ : List[Any] = os.path.join(A , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(A )
# Loading the dataset from local csv or json files.
UpperCAmelCase_ : str = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
UpperCAmelCase_ : Union[str, Any] = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(A , exist_ok=A )
shutil.copy(A , os.path.join(A , F"eval_results_iter-{iteration}.json" ) )
if os.path.exists(A ):
shutil.copy(A , os.path.join(A , F"test_results_iter-{iteration}.json" ) )
create_pseudo_labeled_data(A , A , A , A , A , A )
accelerator.wait_for_everyone()
UpperCAmelCase_ : int = os.path.join(A , F"train_pseudo.{args.data_file_extension}" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
UpperCAmelCase_ : List[str] = eval_result
if best_iteration is None:
UpperCAmelCase_ : Any = new_iteration
UpperCAmelCase_ : List[Any] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
UpperCAmelCase_ : str = new_iteration
UpperCAmelCase_ : Optional[int] = new_eval_result
UpperCAmelCase_ : Any = 0
else:
if new_eval_result == best_eval_result:
UpperCAmelCase_ : int = new_iteration
UpperCAmelCase_ : List[Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
UpperCAmelCase_ : List[Any] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , A )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , A )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A , F"eval_results_iter-{iteration}.json" ) , os.path.join(A , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , A )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A , F"eval_results_iter-{args.max_selftrain_iterations - 1}.json" ) , os.path.join(A , '''eval_results_best-iteration.json''' ) , )
| 541
| 0
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = MgpstrTokenizer
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Union[str, Any] = {}
_UpperCamelCase : str = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
# fmt: off
lowercase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
lowercase = dict(zip(snake_case , range(len(snake_case ) ) ) )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case ) + '\n' )
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = 'tester'
lowercase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_tokenizers(do_lower_case=snake_case )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
lowercase = tokenizer.encode([special_token] , add_special_tokens=snake_case )
self.assertEqual(len(snake_case ) , 1 )
lowercase = tokenizer.decode(snake_case , skip_special_tokens=snake_case )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
lowercase , lowercase = self.get_input_output_texts(snake_case )
lowercase = tokenizer.tokenize(snake_case )
lowercase = tokenizer.convert_tokens_to_ids(snake_case )
lowercase = tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
lowercase = tokenizer.convert_ids_to_tokens(snake_case )
self.assertNotEqual(len(snake_case ) , 0 )
lowercase = tokenizer.decode(snake_case )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual(text_a.replace(' ' , '' ) , snake_case )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
| 565
|
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = [[] for _ in range(__SCREAMING_SNAKE_CASE )]
lowercase = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(__SCREAMING_SNAKE_CASE ) <= key:
return input_string
for position, character in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase = position % (lowest * 2) # puts it in bounds
lowercase = min(__SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(__SCREAMING_SNAKE_CASE )
lowercase = [''.join(__SCREAMING_SNAKE_CASE ) for row in temp_grid]
lowercase = ''.join(__SCREAMING_SNAKE_CASE )
return output_string
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = []
lowercase = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
lowercase = [[] for _ in range(__SCREAMING_SNAKE_CASE )] # generates template
for position in range(len(__SCREAMING_SNAKE_CASE ) ):
lowercase = position % (lowest * 2) # puts it in bounds
lowercase = min(__SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
lowercase = 0
for row in temp_grid: # fills in the characters
lowercase = input_string[counter : counter + len(__SCREAMING_SNAKE_CASE )]
grid.append(list(__SCREAMING_SNAKE_CASE ) )
counter += len(__SCREAMING_SNAKE_CASE )
lowercase = '' # reads as zigzag
for position in range(len(__SCREAMING_SNAKE_CASE ) ):
lowercase = position % (lowest * 2) # puts it in bounds
lowercase = min(__SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = {}
for key_guess in range(1 , len(__SCREAMING_SNAKE_CASE ) ): # tries every key
lowercase = decrypt(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 565
| 1
|
def a_ ( __lowercase : int = 4_000_000 ) -> List[Any]:
_snake_case = [0, 1]
_snake_case = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
_snake_case = 0
for j in range(len(lowercase__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'{solution() = }')
| 686
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[str] = tempfile.mkdtemp()
snake_case : List[str] = SamImageProcessor()
snake_case : List[Any] = SamProcessor(SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ).image_processor
def lowerCamelCase_ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case : Tuple = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[Any] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case : int = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
snake_case : List[Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = self.get_image_processor()
snake_case : Tuple = SamProcessor(image_processor=SCREAMING_SNAKE_CASE )
snake_case : List[str] = self.prepare_image_inputs()
snake_case : Optional[int] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="np" )
snake_case : Union[str, Any] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Dict = self.get_image_processor()
snake_case : int = SamProcessor(image_processor=SCREAMING_SNAKE_CASE )
snake_case : Dict = [torch.ones((1, 3, 5, 5) )]
snake_case : Optional[Any] = [[1_764, 2_646]]
snake_case : List[Any] = [[683, 1_024]]
snake_case : int = processor.post_process_masks(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
snake_case : Tuple = processor.post_process_masks(
SCREAMING_SNAKE_CASE , torch.tensor(SCREAMING_SNAKE_CASE ) , torch.tensor(SCREAMING_SNAKE_CASE ) )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
# should also work with np
snake_case : Any = [np.ones((1, 3, 5, 5) )]
snake_case : Optional[int] = processor.post_process_masks(SCREAMING_SNAKE_CASE , np.array(SCREAMING_SNAKE_CASE ) , np.array(SCREAMING_SNAKE_CASE ) )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
snake_case : Union[str, Any] = [[1, 0], [0, 1]]
with self.assertRaises(SCREAMING_SNAKE_CASE ):
snake_case : Tuple = processor.post_process_masks(SCREAMING_SNAKE_CASE , np.array(SCREAMING_SNAKE_CASE ) , np.array(SCREAMING_SNAKE_CASE ) )
@require_vision
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[Any] = tempfile.mkdtemp()
snake_case : Any = SamImageProcessor()
snake_case : List[str] = SamProcessor(SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ).image_processor
def lowerCamelCase_ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case : Tuple = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[int] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case : List[str] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
snake_case : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Union[str, Any] = self.get_image_processor()
snake_case : Union[str, Any] = SamProcessor(image_processor=SCREAMING_SNAKE_CASE )
snake_case : str = self.prepare_image_inputs()
snake_case : List[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="np" )
snake_case : List[str] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[str] = self.get_image_processor()
snake_case : List[str] = SamProcessor(image_processor=SCREAMING_SNAKE_CASE )
snake_case : Any = [tf.ones((1, 3, 5, 5) )]
snake_case : Dict = [[1_764, 2_646]]
snake_case : Optional[Any] = [[683, 1_024]]
snake_case : int = processor.post_process_masks(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
snake_case : str = processor.post_process_masks(
SCREAMING_SNAKE_CASE , tf.convert_to_tensor(SCREAMING_SNAKE_CASE ) , tf.convert_to_tensor(SCREAMING_SNAKE_CASE ) , return_tensors="tf" , )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
# should also work with np
snake_case : str = [np.ones((1, 3, 5, 5) )]
snake_case : Any = processor.post_process_masks(
SCREAMING_SNAKE_CASE , np.array(SCREAMING_SNAKE_CASE ) , np.array(SCREAMING_SNAKE_CASE ) , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 1_764, 2_646) )
snake_case : List[str] = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
snake_case : Optional[Any] = processor.post_process_masks(
SCREAMING_SNAKE_CASE , np.array(SCREAMING_SNAKE_CASE ) , np.array(SCREAMING_SNAKE_CASE ) , return_tensors="tf" )
@require_vision
@require_torchvision
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Dict = tempfile.mkdtemp()
snake_case : str = SamImageProcessor()
snake_case : Dict = SamProcessor(SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ).image_processor
def lowerCamelCase_ ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
snake_case : Dict = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Dict = self.get_image_processor()
snake_case : List[str] = SamProcessor(image_processor=SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
snake_case : str = [tf.convert_to_tensor(SCREAMING_SNAKE_CASE )]
snake_case : str = [torch.tensor(SCREAMING_SNAKE_CASE )]
snake_case : int = [[1_764, 2_646]]
snake_case : List[Any] = [[683, 1_024]]
snake_case : Any = processor.post_process_masks(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_tensors="tf" )
snake_case : Optional[int] = processor.post_process_masks(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = self.get_image_processor()
snake_case : Optional[int] = SamProcessor(image_processor=SCREAMING_SNAKE_CASE )
snake_case : Any = self.prepare_image_inputs()
snake_case : Dict = image_processor(SCREAMING_SNAKE_CASE , return_tensors="pt" )["pixel_values"].numpy()
snake_case : Dict = processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" )["pixel_values"].numpy()
snake_case : List[str] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="tf" )["pixel_values"].numpy()
snake_case : List[str] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="tf" )["pixel_values"].numpy()
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
| 134
| 0
|
from bisect import bisect
from itertools import accumulate
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = sorted(zip(_lowerCAmelCase , _lowerCAmelCase ) , key=lambda _lowerCAmelCase : x[0] / x[1] , reverse=_lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase = [i[0] for i in r], [i[1] for i in r]
UpperCAmelCase = list(accumulate(_lowerCAmelCase ) )
UpperCAmelCase = bisect(_lowerCAmelCase , _lowerCAmelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase =logging.get_logger(__name__)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase=False ):
"""simple docstring"""
UpperCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase = ""
else:
UpperCAmelCase = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase = dct.pop(_lowerCAmelCase )
UpperCAmelCase = val
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=True ):
"""simple docstring"""
UpperCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
UpperCAmelCase = 8
# set labels if required
if not base_model:
UpperCAmelCase = 10_00
UpperCAmelCase = "huggingface/label-files"
UpperCAmelCase = "imagenet-1k-id2label.json"
UpperCAmelCase = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
UpperCAmelCase = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
UpperCAmelCase = 3_84
UpperCAmelCase = 15_36
UpperCAmelCase = 12
UpperCAmelCase = 6
# load original model from torch hub
UpperCAmelCase = torch.hub.load("facebookresearch/dino:main" , _lowerCAmelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
UpperCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(_lowerCAmelCase )
UpperCAmelCase = create_rename_keys(_lowerCAmelCase , base_model=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
if base_model:
UpperCAmelCase = ViTModel(_lowerCAmelCase , add_pooling_layer=_lowerCAmelCase ).eval()
else:
UpperCAmelCase = ViTForImageClassification(_lowerCAmelCase ).eval()
model.load_state_dict(_lowerCAmelCase )
# Check outputs on an image, prepared by ViTImageProcessor
UpperCAmelCase = ViTImageProcessor()
UpperCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase = encoding["pixel_values"]
UpperCAmelCase = model(_lowerCAmelCase )
if base_model:
UpperCAmelCase = original_model(_lowerCAmelCase )
assert torch.allclose(_lowerCAmelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
UpperCAmelCase = original_model(_lowerCAmelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1E-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="dino_vitb16",
type=str,
help="Name of the model trained with DINO you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--base_model",
action="store_true",
help="Whether to only convert the base model (no projection head weights).",
)
parser.set_defaults(base_model=True)
__lowerCAmelCase =parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 405
| 0
|
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if digit_amount > 0:
return round(number - int(lowerCAmelCase_ ) , lowerCAmelCase_ )
return number - int(lowerCAmelCase_ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 496
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__)
class __lowerCAmelCase ( a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ['pixel_values']
def __init__( self : Optional[int] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = PIL.Image.BICUBIC , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : Union[int, float] = 1 / 2_5_5 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , **_lowerCAmelCase : Optional[Any] , ) -> None:
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
snake_case_ = size if size is not None else {"height": 2_5_6, "width": 2_5_6}
snake_case_ = get_size_dict(_lowerCAmelCase )
snake_case_ = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
snake_case_ = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
snake_case_ = do_resize
snake_case_ = size
snake_case_ = resample
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : PILImageResampling = PIL.Image.BICUBIC , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
snake_case_ = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
_lowerCAmelCase , size=(size["height"], size["width"]) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : int , ) -> np.ndarray:
"""simple docstring"""
snake_case_ = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[int, float] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Union[str, Any] , ) -> Optional[Any]:
"""simple docstring"""
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Union[float, List[float]] , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Any , ) -> np.ndarray:
"""simple docstring"""
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : ImageInput , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : int=None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : float = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[float, List[float]]] = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , _lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **_lowerCAmelCase : Tuple , ) -> PIL.Image.Image:
"""simple docstring"""
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = resample if resample is not None else self.resample
snake_case_ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(_lowerCAmelCase )
snake_case_ = crop_size if crop_size is not None else self.crop_size
snake_case_ = get_size_dict(_lowerCAmelCase , param_name="crop_size" )
snake_case_ = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
snake_case_ = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_center_crop:
snake_case_ = [self.center_crop(image=_lowerCAmelCase , size=_lowerCAmelCase ) for image in images]
if do_rescale:
snake_case_ = [self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
snake_case_ = [self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
snake_case_ = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
snake_case_ = {"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 283
| 0
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Dict, lowerCamelCase : Dict, lowerCamelCase : Any=13, lowerCamelCase : Union[str, Any]=7, lowerCamelCase : Dict=True, lowerCamelCase : Dict=True, lowerCamelCase : int=True, lowerCamelCase : int=True, lowerCamelCase : Any=99, lowerCamelCase : Union[str, Any]=32, lowerCamelCase : Any=2, lowerCamelCase : Any=4, lowerCamelCase : Any=37, lowerCamelCase : Tuple="gelu", lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[Any]=0.1, lowerCamelCase : Optional[int]=512, lowerCamelCase : int=16, lowerCamelCase : Union[str, Any]=2, lowerCamelCase : Optional[Any]=0.02, lowerCamelCase : int=3, lowerCamelCase : Any=4, lowerCamelCase : str=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = 13
lowercase__ = 7
lowercase__ = True
lowercase__ = True
lowercase__ = True
lowercase__ = True
lowercase__ = 99
lowercase__ = 32
lowercase__ = 2
lowercase__ = 4
lowercase__ = 37
lowercase__ = '''gelu'''
lowercase__ = 0.1
lowercase__ = 0.1
lowercase__ = 512
lowercase__ = 16
lowercase__ = 2
lowercase__ = 0.02
lowercase__ = 3
lowercase__ = 4
lowercase__ = None
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ = ids_tensor([self.batch_size], self.num_choices )
lowercase__ = RoFormerConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, return_dict=lowerCamelCase, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Optional[Any], lowerCamelCase : Any, lowerCamelCase : str, lowerCamelCase : int, lowerCamelCase : Dict, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[int], lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = TFRoFormerModel(config=lowerCamelCase )
lowercase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCamelCase )
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[int], lowerCamelCase : Any, lowerCamelCase : int, lowerCamelCase : int, lowerCamelCase : List[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = True
lowercase__ = TFRoFormerForCausalLM(config=lowerCamelCase )
lowercase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ = model(lowerCamelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ), [self.batch_size, self.seq_length, self.vocab_size] )
def lowercase__ ( self : Tuple, lowerCamelCase : Any, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : List[Any], lowerCamelCase : Tuple, lowerCamelCase : Any ):
'''simple docstring'''
lowercase__ = TFRoFormerForMaskedLM(config=lowerCamelCase )
lowercase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Any, lowerCamelCase : str, lowerCamelCase : int, lowerCamelCase : Any, lowerCamelCase : int, lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = TFRoFormerForSequenceClassification(config=lowerCamelCase )
lowercase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict, lowerCamelCase : Tuple, lowerCamelCase : List[str], lowerCamelCase : Any, lowerCamelCase : str, lowerCamelCase : Optional[int], lowerCamelCase : int, lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = self.num_choices
lowercase__ = TFRoFormerForMultipleChoice(config=lowerCamelCase )
lowercase__ = tf.tile(tf.expand_dims(lowerCamelCase, 1 ), (1, self.num_choices, 1) )
lowercase__ = tf.tile(tf.expand_dims(lowerCamelCase, 1 ), (1, self.num_choices, 1) )
lowercase__ = tf.tile(tf.expand_dims(lowerCamelCase, 1 ), (1, self.num_choices, 1) )
lowercase__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Optional[int], lowerCamelCase : int, lowerCamelCase : Optional[int], lowerCamelCase : Dict, lowerCamelCase : Optional[Any], lowerCamelCase : Dict, lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = TFRoFormerForTokenClassification(config=lowerCamelCase )
lowercase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : str, lowerCamelCase : Optional[int], lowerCamelCase : Dict, lowerCamelCase : int, lowerCamelCase : List[Any], lowerCamelCase : List[str] ):
'''simple docstring'''
lowercase__ = TFRoFormerForQuestionAnswering(config=lowerCamelCase )
lowercase__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase__ = (
{
"""feature-extraction""": TFRoFormerModel,
"""fill-mask""": TFRoFormerForMaskedLM,
"""question-answering""": TFRoFormerForQuestionAnswering,
"""text-classification""": TFRoFormerForSequenceClassification,
"""text-generation""": TFRoFormerForCausalLM,
"""token-classification""": TFRoFormerForTokenClassification,
"""zero-shot""": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : str, lowerCamelCase : Optional[int], lowerCamelCase : Optional[int], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = TFRoFormerModelTester(self )
lowercase__ = ConfigTester(self, config_class=lowerCamelCase, hidden_size=37 )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase )
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase )
@slow
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(lowerCamelCase )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase__ = model(lowerCamelCase )[0]
# TODO Replace vocab size
lowercase__ = 50_000
lowercase__ = [1, 6, vocab_size]
self.assertEqual(output.shape, lowerCamelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase__ = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3], lowerCamelCase, atol=1E-4 )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = 1E-4
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = tf.constant([[4, 10]] )
lowercase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6, embedding_dim=6 )
lowercase__ = emba(input_ids.shape )
lowercase__ = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(lowerCamelCase, lowerCamelCase, atol=self.tolerance )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
lowercase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512, embedding_dim=512 )
emba([2, 16, 512] )
lowercase__ = emba.weight[:3, :5]
tf.debugging.assert_near(lowerCamelCase, lowerCamelCase, atol=self.tolerance )
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
lowercase__ = 1E-4
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = tf.reshape(tf.range(2 * 12 * 16 * 64, dtype=tf.floataa ), shape=(2, 12, 16, 64) ) / 100
lowercase__ = -tf.reshape(tf.range(2 * 12 * 16 * 64, dtype=tf.floataa ), shape=(2, 12, 16, 64) ) / 100
lowercase__ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32, embedding_dim=64 )
lowercase__ = embed_positions([2, 16, 768] )[None, None, :, :]
lowercase__ , lowercase__ = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
lowercase__ = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8], lowerCamelCase, atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8], lowerCamelCase, atol=self.tolerance )
| 701
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A__ : Any = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Union[str, Any] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 671
| 0
|
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.17.0.dev0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
A_ = logging.getLogger(__name__)
@dataclass
class lowercase:
'''simple docstring'''
lowercase__ = field(
default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
lowercase__ = field(
default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , )
lowercase__ = field(
default=10_24 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowercase__ = field(
default=__a , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
lowercase__ = field(
default=__a , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
lowercase__ = field(
default=__a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowercase__ = field(
default=__a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
lowercase__ = field(
default=__a , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
lowercase__ = field(
default=__a , metadata={"help": "A csv or a json file containing the training data."} )
lowercase__ = field(
default=__a , metadata={"help": "A csv or a json file containing the validation data."} )
lowercase__ = field(default=__a , metadata={"help": "A csv or a json file containing the test data."} )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
_snake_case : Any = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_snake_case : Tuple = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowercase:
'''simple docstring'''
lowercase__ = field(
default=__a , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowercase__ = field(
default=__a , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowercase__ = field(
default=__a , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowercase__ = field(
default=__a , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowercase__ = field(
default=__a , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowercase__ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowercase__ = field(
default=__a , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_snake_case , _snake_case , _snake_case : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_snake_case , _snake_case , _snake_case : str = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
_snake_case : List[str] = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
datasets.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
_snake_case : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_snake_case : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_snake_case : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_snake_case : Optional[Any] = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_snake_case : int = data_args.train_file.split(""".""" )[-1]
_snake_case : List[Any] = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_snake_case : Union[str, Any] = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
_snake_case : Union[str, Any] = load_dataset("""csv""" , data_files=snake_case__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_snake_case : Tuple = load_dataset("""json""" , data_files=snake_case__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_snake_case : Optional[Any] = raw_datasets["""train"""].features["""label"""].names
_snake_case : Union[str, Any] = len(snake_case__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_snake_case : Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_snake_case : int = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=snake_case__ , )
_snake_case : Optional[Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_snake_case : int = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_snake_case : Tuple = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_snake_case : List[str] = {"""Refused""": 0, """Entailed""": 1}
_snake_case : Optional[int] = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
_snake_case : Dict = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(snake_case__ : List[Any] ):
# Tokenize the texts
def _convert_table_text_to_pandas(snake_case__ : Union[str, Any] ):
_snake_case : Optional[int] = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
_snake_case : List[str] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_snake_case : Optional[Any] = examples["""statement"""]
_snake_case : Optional[Any] = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
_snake_case : Union[str, Any] = tokenizer(snake_case__ , snake_case__ , padding=snake_case__ , max_length=snake_case__ , truncation=snake_case__ )
_snake_case : List[str] = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
_snake_case : str = raw_datasets.map(
snake_case__ , batched=snake_case__ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
_snake_case : int = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
_snake_case : Any = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
_snake_case : Any = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
_snake_case : Any = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
_snake_case : Optional[Any] = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
_snake_case : str = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(snake_case__ ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(snake_case__ : EvalPrediction ):
_snake_case : Optional[Any] = p.predictions[0] if isinstance(p.predictions , snake_case__ ) else p.predictions
_snake_case : Optional[int] = np.argmax(snake_case__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_snake_case : str = default_data_collator
elif training_args.fpaa:
_snake_case : int = DataCollatorWithPadding(snake_case__ , pad_to_multiple_of=8 )
else:
_snake_case : Any = None
# Initialize our Trainer
_snake_case : str = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=snake_case__ , tokenizer=snake_case__ , data_collator=snake_case__ , )
# Training
if training_args.do_train:
_snake_case : List[str] = None
if training_args.resume_from_checkpoint is not None:
_snake_case : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_snake_case : Dict = last_checkpoint
_snake_case : Any = trainer.train(resume_from_checkpoint=snake_case__ )
_snake_case : List[str] = train_result.metrics
_snake_case : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case__ )
)
_snake_case : Optional[Any] = min(snake_case__ , len(snake_case__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , snake_case__ )
trainer.save_metrics("""train""" , snake_case__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_snake_case : List[Any] = trainer.evaluate(eval_dataset=snake_case__ )
_snake_case : List[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(snake_case__ )
_snake_case : List[str] = min(snake_case__ , len(snake_case__ ) )
trainer.log_metrics("""eval""" , snake_case__ )
trainer.save_metrics("""eval""" , snake_case__ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_snake_case : Dict = predict_dataset.remove_columns("""label""" )
_snake_case : Union[str, Any] = trainer.predict(snake_case__ , metric_key_prefix="""predict""" ).predictions
_snake_case : Optional[Any] = np.argmax(snake_case__ , axis=1 )
_snake_case : str = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(snake_case__ , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(snake_case__ ):
_snake_case : int = label_list[item]
writer.write(F"{index}\t{item}\n" )
_snake_case : Optional[int] = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
def UpperCAmelCase__ (snake_case__ : List[str] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 609
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A_ = logging.get_logger(__name__)
class lowercase( __a ):
'''simple docstring'''
lowercase__ = ["pixel_values"]
def __init__( self: List[Any], a_: bool = True, a_: Optional[Dict[str, int]] = None, a_: PILImageResampling = PILImageResampling.BICUBIC, a_: bool = True, a_: bool = True, a_: Union[int, float] = 1 / 255, a_: Dict[str, int] = None, a_: bool = True, a_: Optional[Union[float, List[float]]] = None, a_: Optional[Union[float, List[float]]] = None, **a_: Tuple, ):
'''simple docstring'''
super().__init__(**a_ )
_snake_case : Optional[Any] = size if size is not None else {"""height""": 224, """width""": 224}
_snake_case : str = get_size_dict(a_ )
_snake_case : Optional[int] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_snake_case : Union[str, Any] = get_size_dict(a_, default_to_square=a_, param_name="""crop_size""" )
_snake_case : Union[str, Any] = do_resize
_snake_case : Union[str, Any] = do_rescale
_snake_case : List[str] = do_normalize
_snake_case : int = do_center_crop
_snake_case : str = crop_size
_snake_case : Tuple = size
_snake_case : Any = resample
_snake_case : Tuple = rescale_factor
_snake_case : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_snake_case : Tuple = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCamelCase_ ( self: int, a_: np.ndarray, a_: Dict[str, int], a_: PILImageResampling = PILImageResampling.BILINEAR, a_: Optional[Union[str, ChannelDimension]] = None, **a_: str, ):
'''simple docstring'''
_snake_case : Optional[Any] = get_size_dict(a_ )
if "shortest_edge" in size:
_snake_case : Dict = get_resize_output_image_size(a_, size=size["""shortest_edge"""], default_to_square=a_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_snake_case : int = (size["""height"""], size["""width"""])
else:
raise ValueError(f"Size must contain 'height' and 'width' keys or 'shortest_edge' key. Got {size.keys()}" )
return resize(a_, size=a_, resample=a_, data_format=a_, **a_ )
def UpperCamelCase_ ( self: List[Any], a_: np.ndarray, a_: Dict[str, int], a_: Optional[Union[str, ChannelDimension]] = None, **a_: Optional[int], ):
'''simple docstring'''
_snake_case : List[str] = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(a_, size=(size["""height"""], size["""width"""]), data_format=a_, **a_ )
def UpperCamelCase_ ( self: Optional[int], a_: np.ndarray, a_: float, a_: Optional[Union[str, ChannelDimension]] = None, **a_: int ):
'''simple docstring'''
return rescale(a_, scale=a_, data_format=a_, **a_ )
def UpperCamelCase_ ( self: Dict, a_: np.ndarray, a_: Union[float, List[float]], a_: Union[float, List[float]], a_: Optional[Union[str, ChannelDimension]] = None, **a_: int, ):
'''simple docstring'''
return normalize(a_, mean=a_, std=a_, data_format=a_, **a_ )
def UpperCamelCase_ ( self: Optional[Any], a_: ImageInput, a_: Optional[bool] = None, a_: Dict[str, int] = None, a_: PILImageResampling = None, a_: bool = None, a_: int = None, a_: Optional[bool] = None, a_: Optional[float] = None, a_: Optional[bool] = None, a_: Optional[Union[float, List[float]]] = None, a_: Optional[Union[float, List[float]]] = None, a_: Optional[Union[str, TensorType]] = None, a_: Union[str, ChannelDimension] = ChannelDimension.FIRST, **a_: List[Any], ):
'''simple docstring'''
_snake_case : Optional[Any] = do_resize if do_resize is not None else self.do_resize
_snake_case : str = do_rescale if do_rescale is not None else self.do_rescale
_snake_case : int = do_normalize if do_normalize is not None else self.do_normalize
_snake_case : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case : Dict = crop_size if crop_size is not None else self.crop_size
_snake_case : int = get_size_dict(a_, param_name="""crop_size""", default_to_square=a_ )
_snake_case : Any = resample if resample is not None else self.resample
_snake_case : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case : Tuple = image_mean if image_mean is not None else self.image_mean
_snake_case : Any = image_std if image_std is not None else self.image_std
_snake_case : Optional[int] = size if size is not None else self.size
_snake_case : List[Any] = get_size_dict(a_ )
if not is_batched(a_ ):
_snake_case : Tuple = [images]
if not valid_images(a_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
_snake_case : str = [to_numpy_array(a_ ) for image in images]
if do_resize:
_snake_case : int = [self.resize(image=a_, size=a_, resample=a_ ) for image in images]
if do_center_crop:
_snake_case : Optional[Any] = [self.center_crop(image=a_, size=a_ ) for image in images]
if do_rescale:
_snake_case : Any = [self.rescale(image=a_, scale=a_ ) for image in images]
if do_normalize:
_snake_case : Any = [self.normalize(image=a_, mean=a_, std=a_ ) for image in images]
_snake_case : Optional[Any] = [to_channel_dimension_format(a_, a_ ) for image in images]
_snake_case : Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=a_, tensor_type=a_ )
| 609
| 1
|
"""simple docstring"""
def lowercase (_snake_case ,_snake_case ) -> Any:
'''simple docstring'''
__UpperCamelCase = 0
__UpperCamelCase = len(lowerCAmelCase_ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCamelCase = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCAmelCase_ ):
return None
__UpperCamelCase = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__UpperCamelCase = left
__UpperCamelCase = point
elif point > right:
__UpperCamelCase = right
__UpperCamelCase = point
else:
if item < current_item:
__UpperCamelCase = point - 1
else:
__UpperCamelCase = point + 1
return None
def lowercase (_snake_case ,_snake_case ,_snake_case ,_snake_case ) -> Optional[int]:
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__UpperCamelCase = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCAmelCase_ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
elif point > right:
return interpolation_search_by_recursion(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,point - 1 )
else:
return interpolation_search_by_recursion(
lowerCAmelCase_ ,lowerCAmelCase_ ,point + 1 ,lowerCAmelCase_ )
def lowercase (_snake_case ) -> Optional[int]:
'''simple docstring'''
if collection != sorted(lowerCAmelCase_ ):
raise ValueError("Collection must be ascending sorted" )
return True
if __name__ == "__main__":
import sys
_A = 0
if debug == 1:
_A = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
_A = 67
_A = interpolation_search(collection, target)
if result is not None:
print(f"""{target} found at positions: {result}""")
else:
print("Not found")
| 707
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : int )-> Union[str, Any]:
__UpperCamelCase = tempfile.mkdtemp()
# fmt: off
__UpperCamelCase = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) )
__UpperCamelCase = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
__UpperCamelCase = {"unk_token": "<unk>"}
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(A_ ) )
__UpperCamelCase = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
__UpperCamelCase = os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(A_ , A_ )
def A ( self : Dict , **A_ : List[str] )-> List[Any]:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A_ )
def A ( self : Optional[int] , **A_ : Any )-> Tuple:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def A ( self : Any , **A_ : List[Any] )-> Optional[int]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def A ( self : Tuple )-> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def A ( self : int )-> str:
__UpperCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__UpperCamelCase = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A ( self : List[Any] )-> Optional[Any]:
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = self.get_rust_tokenizer()
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
processor_slow.save_pretrained(self.tmpdirname )
__UpperCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=A_ )
__UpperCamelCase = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
processor_fast.save_pretrained(self.tmpdirname )
__UpperCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A_ )
self.assertIsInstance(processor_fast.tokenizer , A_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A_ )
self.assertIsInstance(processor_fast.image_processor , A_ )
def A ( self : Dict )-> Dict:
__UpperCamelCase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__UpperCamelCase = self.get_image_processor(do_normalize=A_ , padding_value=1.0 )
__UpperCamelCase = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def A ( self : int )-> Any:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = image_processor(A_ , return_tensors="np" )
__UpperCamelCase = processor(images=A_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A ( self : int )-> Union[str, Any]:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase = "lower newer"
__UpperCamelCase = processor(text=A_ )
__UpperCamelCase = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A ( self : List[Any] )-> List[Any]:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase = "lower newer"
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def A ( self : Union[str, Any] )-> Union[str, Any]:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = self.prepare_image_inputs()
__UpperCamelCase = processor(images=A_ , visual_prompt=A_ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def A ( self : Optional[int] )-> int:
__UpperCamelCase = self.get_image_processor()
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
__UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase = processor.batch_decode(A_ )
__UpperCamelCase = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
| 228
| 0
|
from __future__ import annotations
import math
import random
from typing import Any
class __A :
def __init__( self : Dict ):
lowerCAmelCase : list[Any] = []
lowerCAmelCase : int = 0
lowerCAmelCase : int = 0
def lowercase__ ( self : List[Any] ):
return self.head == self.tail
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Any ):
self.data.append(UpperCAmelCase_ )
lowerCAmelCase : int = self.tail + 1
def lowercase__ ( self : str ):
lowerCAmelCase : List[str] = self.data[self.head]
lowerCAmelCase : Optional[Any] = self.head + 1
return ret
def lowercase__ ( self : Dict ):
return self.tail - self.head
def lowercase__ ( self : Optional[Any] ):
print(self.data )
print('**************' )
print(self.data[self.head : self.tail] )
class __A :
def __init__( self : Any , UpperCAmelCase_ : Any ):
lowerCAmelCase : List[Any] = data
lowerCAmelCase : MyNode | None = None
lowerCAmelCase : MyNode | None = None
lowerCAmelCase : int = 1
def lowercase__ ( self : Union[str, Any] ):
return self.data
def lowercase__ ( self : List[str] ):
return self.left
def lowercase__ ( self : List[Any] ):
return self.right
def lowercase__ ( self : Union[str, Any] ):
return self.height
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Any ):
lowerCAmelCase : List[Any] = data
def lowercase__ ( self : Tuple , UpperCAmelCase_ : MyNode | None ):
lowerCAmelCase : int = node
def lowercase__ ( self : Dict , UpperCAmelCase_ : MyNode | None ):
lowerCAmelCase : int = node
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : int ):
lowerCAmelCase : List[Any] = height
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
if node is None:
return 0
return node.get_height()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> int:
'''simple docstring'''
if a > b:
return a
return b
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> MyNode:
'''simple docstring'''
print('left rotation node:', node.get_data() )
lowerCAmelCase : List[Any] = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(_UpperCAmelCase )
lowerCAmelCase : Union[str, Any] = my_max(get_height(node.get_right() ), get_height(node.get_left() ) ) + 1
node.set_height(_UpperCAmelCase )
lowerCAmelCase : Any = my_max(get_height(ret.get_right() ), get_height(ret.get_left() ) ) + 1
ret.set_height(_UpperCAmelCase )
return ret
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> MyNode:
'''simple docstring'''
print('right rotation node:', node.get_data() )
lowerCAmelCase : Any = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(_UpperCAmelCase )
lowerCAmelCase : Any = my_max(get_height(node.get_right() ), get_height(node.get_left() ) ) + 1
node.set_height(_UpperCAmelCase )
lowerCAmelCase : int = my_max(get_height(ret.get_right() ), get_height(ret.get_left() ) ) + 1
ret.set_height(_UpperCAmelCase )
return ret
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> MyNode:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_UpperCAmelCase ) )
return right_rotation(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> MyNode:
'''simple docstring'''
lowerCAmelCase : Tuple = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_UpperCAmelCase ) )
return left_rotation(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> MyNode | None:
'''simple docstring'''
if node is None:
return MyNode(_UpperCAmelCase )
if data < node.get_data():
node.set_left(insert_node(node.get_left(), _UpperCAmelCase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
lowerCAmelCase : Any = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
lowerCAmelCase : List[str] = right_rotation(_UpperCAmelCase )
else:
lowerCAmelCase : Any = lr_rotation(_UpperCAmelCase )
else:
node.set_right(insert_node(node.get_right(), _UpperCAmelCase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
lowerCAmelCase : Optional[int] = node.get_right()
assert right_child is not None
if data < right_child.get_data():
lowerCAmelCase : Optional[Any] = rl_rotation(_UpperCAmelCase )
else:
lowerCAmelCase : Any = left_rotation(_UpperCAmelCase )
lowerCAmelCase : List[Any] = my_max(get_height(node.get_right() ), get_height(node.get_left() ) ) + 1
node.set_height(_UpperCAmelCase )
return node
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Any:
'''simple docstring'''
while True:
lowerCAmelCase : str = root.get_right()
if right_child is None:
break
lowerCAmelCase : List[str] = right_child
return root.get_data()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Any:
'''simple docstring'''
while True:
lowerCAmelCase : int = root.get_left()
if left_child is None:
break
lowerCAmelCase : int = left_child
return root.get_data()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> MyNode | None:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = root.get_left()
lowerCAmelCase : int = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
lowerCAmelCase : List[Any] = get_left_most(_UpperCAmelCase )
root.set_data(_UpperCAmelCase )
root.set_right(del_node(_UpperCAmelCase, _UpperCAmelCase ) )
elif left_child is not None:
lowerCAmelCase : Dict = left_child
elif right_child is not None:
lowerCAmelCase : Tuple = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('No such data' )
return root
else:
root.set_left(del_node(_UpperCAmelCase, _UpperCAmelCase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_UpperCAmelCase, _UpperCAmelCase ) )
if get_height(_UpperCAmelCase ) - get_height(_UpperCAmelCase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
lowerCAmelCase : Dict = left_rotation(_UpperCAmelCase )
else:
lowerCAmelCase : int = rl_rotation(_UpperCAmelCase )
elif get_height(_UpperCAmelCase ) - get_height(_UpperCAmelCase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
lowerCAmelCase : Dict = right_rotation(_UpperCAmelCase )
else:
lowerCAmelCase : List[str] = lr_rotation(_UpperCAmelCase )
lowerCAmelCase : Dict = my_max(get_height(root.get_right() ), get_height(root.get_left() ) ) + 1
root.set_height(_UpperCAmelCase )
return root
class __A :
def __init__( self : Tuple ):
lowerCAmelCase : MyNode | None = None
def lowercase__ ( self : List[Any] ):
return get_height(self.root )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Any ):
print('insert:' + str(UpperCAmelCase_ ) )
lowerCAmelCase : Optional[Any] = insert_node(self.root , UpperCAmelCase_ )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : Any ):
print('delete:' + str(UpperCAmelCase_ ) )
if self.root is None:
print('Tree is empty!' )
return
lowerCAmelCase : Dict = del_node(self.root , UpperCAmelCase_ )
def __str__( self : Optional[Any] , ): # a level traversale, gives a more intuitive look on the tree
lowerCAmelCase : Optional[int] = ''
lowerCAmelCase : str = MyQueue()
q.push(self.root )
lowerCAmelCase : Optional[Any] = self.get_height()
if layer == 0:
return output
lowerCAmelCase : Optional[int] = 0
while not q.is_empty():
lowerCAmelCase : List[Any] = q.pop()
lowerCAmelCase : str = ' ' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(UpperCAmelCase_ )
q.push(UpperCAmelCase_ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
lowerCAmelCase : Any = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , UpperCAmelCase_ ) - 1:
lowerCAmelCase : List[Any] = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def SCREAMING_SNAKE_CASE__ ( ) -> None:
'''simple docstring'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
__A : str = AVLtree()
__A : Tuple = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 343
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
def wrapper(*_UpperCAmelCase, **_UpperCAmelCase ):
lowerCAmelCase : str = timeit.default_timer()
lowerCAmelCase : str = func(*_UpperCAmelCase, **_UpperCAmelCase )
lowerCAmelCase : Optional[int] = timeit.default_timer() - starttime
return delta
lowerCAmelCase : Union[str, Any] = func.__name__
return wrapper
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase=100, _UpperCAmelCase=None ) -> Any:
'''simple docstring'''
lowerCAmelCase : Dict = []
lowerCAmelCase : Optional[int] = seq_shapes or {}
for i in range(_UpperCAmelCase ):
lowerCAmelCase : Any = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(_UpperCAmelCase, _ArrayXD ):
lowerCAmelCase : Dict = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(_UpperCAmelCase, datasets.Value ):
if v.dtype == "string":
lowerCAmelCase : Any = 'The small grey turtle was surprisingly fast when challenged.'
else:
lowerCAmelCase : Optional[Any] = np.random.randint(10, size=1 ).astype(v.dtype ).item()
elif isinstance(_UpperCAmelCase, datasets.Sequence ):
while isinstance(_UpperCAmelCase, datasets.Sequence ):
lowerCAmelCase : int = v.feature
lowerCAmelCase : Optional[int] = seq_shapes[k]
lowerCAmelCase : str = np.random.rand(*_UpperCAmelCase ).astype(v.dtype )
lowerCAmelCase : Any = data
dummy_data.append((i, example) )
return dummy_data
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=100, _UpperCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase : Any = generate_examples(_UpperCAmelCase, num_examples=_UpperCAmelCase, seq_shapes=_UpperCAmelCase )
with ArrowWriter(features=_UpperCAmelCase, path=_UpperCAmelCase ) as writer:
for key, record in dummy_data:
lowerCAmelCase : Any = features.encode_example(_UpperCAmelCase )
writer.write(_UpperCAmelCase )
lowerCAmelCase , lowerCAmelCase : Optional[int] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
lowerCAmelCase : int = datasets.Dataset.from_file(filename=_UpperCAmelCase, info=datasets.DatasetInfo(features=_UpperCAmelCase ) )
return dataset
| 343
| 1
|
from ...configuration_utils import PretrainedConfig
class A__ ( __snake_case ):
'''simple docstring'''
snake_case__ = """bert-generation"""
def __init__( self : Dict , _SCREAMING_SNAKE_CASE : Optional[int]=5_0358 , _SCREAMING_SNAKE_CASE : int=1024 , _SCREAMING_SNAKE_CASE : Tuple=24 , _SCREAMING_SNAKE_CASE : Union[str, Any]=16 , _SCREAMING_SNAKE_CASE : Tuple=4096 , _SCREAMING_SNAKE_CASE : Tuple="gelu" , _SCREAMING_SNAKE_CASE : Optional[int]=0.1 , _SCREAMING_SNAKE_CASE : Dict=0.1 , _SCREAMING_SNAKE_CASE : List[Any]=512 , _SCREAMING_SNAKE_CASE : List[Any]=0.0_2 , _SCREAMING_SNAKE_CASE : str=1E-1_2 , _SCREAMING_SNAKE_CASE : int=0 , _SCREAMING_SNAKE_CASE : Any=2 , _SCREAMING_SNAKE_CASE : Optional[Any]=1 , _SCREAMING_SNAKE_CASE : Union[str, Any]="absolute" , _SCREAMING_SNAKE_CASE : Union[str, Any]=True , **_SCREAMING_SNAKE_CASE : List[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
| 410
|
__magic_name__ : List[str] = tuple[float, float, float]
__magic_name__ : Optional[int] = tuple[float, float, float]
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> Vectorad:
"""simple docstring"""
UpperCamelCase = end_pointa[0] - end_pointa[0]
UpperCamelCase = end_pointa[1] - end_pointa[1]
UpperCamelCase = end_pointa[2] - end_pointa[2]
return (x, y, z)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> Vectorad:
"""simple docstring"""
UpperCamelCase = ab[1] * ac[2] - ab[2] * ac[1] # *i
UpperCamelCase = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
UpperCamelCase = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> bool:
"""simple docstring"""
return tuple(round(_UpperCamelCase , _UpperCamelCase) for x in vector) == (0, 0, 0)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 10) -> bool:
"""simple docstring"""
UpperCamelCase = create_vector(_UpperCamelCase , _UpperCamelCase)
UpperCamelCase = create_vector(_UpperCamelCase , _UpperCamelCase)
return is_zero_vector(get_ad_vectors_cross(_UpperCamelCase , _UpperCamelCase) , _UpperCamelCase)
| 410
| 1
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = MODEL_FOR_MASKED_LM_MAPPING
snake_case_ = TF_MODEL_FOR_MASKED_LM_MAPPING
def lowercase_ ( self ) -> str:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
__lowerCamelCase = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1e-05, 'token': 38_015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1e-05, 'token': 25_506, 'token_str': ' accuser'},
] , )
__lowerCamelCase = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1e-05,
'token': 38_015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1e-05,
'token': 25_506,
'token_str': ' accuser',
},
] , )
__lowerCamelCase = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13_606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2e-05, 'token': 3_499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9e-05, 'token': 2_941, 'token_str': ' Te'},
] , )
@require_torch
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
__lowerCamelCase = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2e-05, 'token': 35_676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2e-05, 'token': 16_416, 'token_str': 'ELS'},
] , )
__lowerCamelCase = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2e-05,
'token': 35_676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2e-05, 'token': 16_416, 'token_str': 'ELS'},
] , )
__lowerCamelCase = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1e-05, 'token': 3_499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2e-05, 'token': 2_941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2e-05, 'token': 13_606, 'token_str': ' Clara'},
] , )
__lowerCamelCase = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=6 ) , [
[
{
'score': 2.2e-05,
'token': 35_676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2e-05, 'token': 16_416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2e-05,
'token': 35_676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2e-05, 'token': 16_416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
__lowerCamelCase = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
@slow
@require_torch
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(lowerCamelCase__ )
@slow
@require_tf
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , [
{'sequence': 'My name is John', 'score': 0.0_08, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.0_07, 'token': 1_573, 'token_str': ' Chris'},
] , )
__lowerCamelCase = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.2_51,
'token': 2_201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.2_14,
'token': 12_790,
'token_str': ' Lyon',
},
] , )
__lowerCamelCase = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(lowerCamelCase__ ) , [
{'sequence': 'My name is Patrick', 'score': 0.0_05, 'token': 3_499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.0_00, 'token': 13_606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.0_00, 'token': 2_941, 'token_str': ' Te'},
] , )
@require_torch
def lowercase_ ( self ) -> int:
'''simple docstring'''
__lowerCamelCase = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
__lowerCamelCase = None
__lowerCamelCase = None
self.run_pipeline_test(lowerCamelCase__ , [] )
@require_tf
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
__lowerCamelCase = None
__lowerCamelCase = None
self.run_pipeline_test(lowerCamelCase__ , [] )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
__lowerCamelCase = FillMaskPipeline(model=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
__lowerCamelCase = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = fill_masker.tokenizer
__lowerCamelCase = fill_masker.model
__lowerCamelCase = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
lowerCamelCase__ , [
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
] , )
__lowerCamelCase = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
lowerCamelCase__ , [
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
] , )
__lowerCamelCase = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
lowerCamelCase__ , [
[
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
],
[
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
],
] , )
with self.assertRaises(lowerCamelCase__ ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(lowerCamelCase__ ):
fill_masker('This is' )
self.run_test_top_k(lowerCamelCase__ , lowerCamelCase__ )
self.run_test_targets(lowerCamelCase__ , lowerCamelCase__ )
self.run_test_top_k_targets(lowerCamelCase__ , lowerCamelCase__ )
self.fill_mask_with_duplicate_targets_and_top_k(lowerCamelCase__ , lowerCamelCase__ )
self.fill_mask_with_multiple_masks(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
__lowerCamelCase = tokenizer.get_vocab()
__lowerCamelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
__lowerCamelCase = FillMaskPipeline(model=lowerCamelCase__ , tokenizer=lowerCamelCase__ , targets=lowerCamelCase__ )
__lowerCamelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
lowerCamelCase__ , [
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
] , )
__lowerCamelCase = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , lowerCamelCase__ )
__lowerCamelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(lowerCamelCase__ ) )
# Call argument
__lowerCamelCase = FillMaskPipeline(model=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
__lowerCamelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=lowerCamelCase__ )
self.assertEqual(
lowerCamelCase__ , [
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
] , )
__lowerCamelCase = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , lowerCamelCase__ )
__lowerCamelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(lowerCamelCase__ ) )
# Score equivalence
__lowerCamelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=lowerCamelCase__ )
__lowerCamelCase = [top_mask['token_str'] for top_mask in outputs]
__lowerCamelCase = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowerCamelCase__ ) == set(lowerCamelCase__ ):
__lowerCamelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=lowerCamelCase__ )
__lowerCamelCase = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowerCamelCase__ ) , nested_simplify(lowerCamelCase__ ) )
# Raises with invalid
with self.assertRaises(lowerCamelCase__ ):
__lowerCamelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowerCamelCase__ ):
__lowerCamelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[''] )
with self.assertRaises(lowerCamelCase__ ):
__lowerCamelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets='' )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = FillMaskPipeline(model=lowerCamelCase__ , tokenizer=lowerCamelCase__ , top_k=2 )
__lowerCamelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
lowerCamelCase__ , [
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
] , )
__lowerCamelCase = FillMaskPipeline(model=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
__lowerCamelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
lowerCamelCase__ , [
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
] , )
self.assertEqual(nested_simplify(lowerCamelCase__ ) , nested_simplify(lowerCamelCase__ ) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = tokenizer.get_vocab()
__lowerCamelCase = FillMaskPipeline(model=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
# top_k=2, ntargets=3
__lowerCamelCase = sorted(vocab.keys() )[:3]
__lowerCamelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=lowerCamelCase__ )
# If we use the most probably targets, and filter differently, we should still
# have the same results
__lowerCamelCase = [el['token_str'] for el in sorted(lowerCamelCase__ , key=lambda lowerCamelCase__ : x["score"] , reverse=lowerCamelCase__ )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowerCamelCase__ ).issubset(lowerCamelCase__ ):
__lowerCamelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=lowerCamelCase__ )
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowerCamelCase__ ) , nested_simplify(lowerCamelCase__ ) )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = FillMaskPipeline(model=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
__lowerCamelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
__lowerCamelCase = sorted(vocab.keys() )[:3]
__lowerCamelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
__lowerCamelCase = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=lowerCamelCase__ , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowerCamelCase__ ) , 3 )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = FillMaskPipeline(model=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
__lowerCamelCase = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
lowerCamelCase__ , [
[
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
],
[
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
],
[
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
{'sequence': ANY(lowerCamelCase__ ), 'score': ANY(lowerCamelCase__ ), 'token': ANY(lowerCamelCase__ ), 'token_str': ANY(lowerCamelCase__ )},
],
] , )
| 469
|
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
__lowerCamelCase = []
for temp in range(int(UpperCamelCase__ ) ):
series.append(F"""1/{temp + 1}""" if series else '1' )
return series
if __name__ == "__main__":
__A = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 469
| 1
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Any=0) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : str = np.random.RandomState(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = self.get_dummy_inputs()
__lowerCAmelCase : Optional[Any] = pipe(**_SCREAMING_SNAKE_CASE).images
__lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCAmelCase : List[str] = np.array([0.6_5072, 0.5_8492, 0.4_8219, 0.5_5521, 0.5_3180, 0.5_5939, 0.5_0697, 0.3_9800, 0.4_6455])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase : Dict = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_SCREAMING_SNAKE_CASE)
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = self.get_dummy_inputs()
__lowerCAmelCase : str = pipe(**_SCREAMING_SNAKE_CASE).images
__lowerCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCAmelCase : str = np.array([0.6_5863, 0.5_9425, 0.4_9326, 0.5_6313, 0.5_3875, 0.5_6627, 0.5_1065, 0.3_9777, 0.4_6330])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self: str) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = self.get_dummy_inputs()
__lowerCAmelCase : Tuple = pipe(**_SCREAMING_SNAKE_CASE).images
__lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCAmelCase : Any = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase : Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = self.get_dummy_inputs()
__lowerCAmelCase : Tuple = pipe(**_SCREAMING_SNAKE_CASE).images
__lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCAmelCase : Tuple = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase : int = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = self.get_dummy_inputs()
__lowerCAmelCase : List[Any] = pipe(**_SCREAMING_SNAKE_CASE).images
__lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCAmelCase : List[Any] = np.array([0.5_3817, 0.6_0812, 0.4_7384, 0.4_9530, 0.5_1894, 0.4_9814, 0.4_7984, 0.3_8958, 0.4_4271])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
__lowerCAmelCase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = self.get_dummy_inputs()
__lowerCAmelCase : List[Any] = pipe(**_SCREAMING_SNAKE_CASE).images
__lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowerCAmelCase : Optional[Any] = np.array([0.5_3895, 0.6_0808, 0.4_7933, 0.4_9608, 0.5_1886, 0.4_9950, 0.4_8053, 0.3_8957, 0.4_4200])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self: Any) -> str:
"""simple docstring"""
__lowerCAmelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = self.get_dummy_inputs()
__lowerCAmelCase : List[str] = 3 * [inputs["prompt"]]
# forward
__lowerCAmelCase : Optional[Any] = pipe(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = output.images[0, -3:, -3:, -1]
__lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs()
__lowerCAmelCase : Union[str, Any] = 3 * [inputs.pop("prompt")]
__lowerCAmelCase : Union[str, Any] = pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors="np" , )
__lowerCAmelCase : Dict = text_inputs["input_ids"]
__lowerCAmelCase : str = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0]
__lowerCAmelCase : Union[str, Any] = prompt_embeds
# forward
__lowerCAmelCase : Tuple = pipe(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1e-4
def _SCREAMING_SNAKE_CASE ( self: Any) -> int:
"""simple docstring"""
__lowerCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider")
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = self.get_dummy_inputs()
__lowerCAmelCase : Optional[int] = 3 * ["this is a negative prompt"]
__lowerCAmelCase : Union[str, Any] = negative_prompt
__lowerCAmelCase : Union[str, Any] = 3 * [inputs["prompt"]]
# forward
__lowerCAmelCase : List[Any] = pipe(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = output.images[0, -3:, -3:, -1]
__lowerCAmelCase : Any = self.get_dummy_inputs()
__lowerCAmelCase : List[Any] = 3 * [inputs.pop("prompt")]
__lowerCAmelCase : Dict = []
for p in [prompt, negative_prompt]:
__lowerCAmelCase : Optional[Any] = pipe.tokenizer(
_SCREAMING_SNAKE_CASE , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=_SCREAMING_SNAKE_CASE , return_tensors="np" , )
__lowerCAmelCase : Any = text_inputs["input_ids"]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa))[0])
__lowerCAmelCase , __lowerCAmelCase : List[str] = embeds
# forward
__lowerCAmelCase : int = pipe(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten()).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class A__ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> int:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : str = ort.SessionOptions()
__lowerCAmelCase : List[str] = False
return options
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Any = OnnxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = "A painting of a squirrel eating a burger"
np.random.seed(0)
__lowerCAmelCase : str = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="np")
__lowerCAmelCase : Union[str, Any] = output.images
__lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCAmelCase : Dict = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self: str) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = DDIMScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx")
__lowerCAmelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = "open neural network exchange"
__lowerCAmelCase : Union[str, Any] = np.random.RandomState(0)
__lowerCAmelCase : List[str] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_SCREAMING_SNAKE_CASE , output_type="np")
__lowerCAmelCase : Tuple = output.images
__lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCAmelCase : Optional[Any] = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> str:
"""simple docstring"""
__lowerCAmelCase : Tuple = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx")
__lowerCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = "open neural network exchange"
__lowerCAmelCase : Any = np.random.RandomState(0)
__lowerCAmelCase : int = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_SCREAMING_SNAKE_CASE , output_type="np")
__lowerCAmelCase : Optional[Any] = output.images
__lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCAmelCase : List[Any] = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self: str) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : str = 0
def test_callback_fn(_SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: np.ndarray) -> None:
__lowerCAmelCase : Optional[int] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__lowerCAmelCase : Optional[int] = latents[0, -3:, -3:, -1]
__lowerCAmelCase : List[str] = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__lowerCAmelCase : Tuple = latents[0, -3:, -3:, -1]
__lowerCAmelCase : Any = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = "Andromeda galaxy in a bottle"
__lowerCAmelCase : Any = np.random.RandomState(0)
pipe(
prompt=_SCREAMING_SNAKE_CASE , num_inference_steps=5 , guidance_scale=7.5 , generator=_SCREAMING_SNAKE_CASE , callback=_SCREAMING_SNAKE_CASE , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def _SCREAMING_SNAKE_CASE ( self: str) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
assert pipe.safety_checker is None
__lowerCAmelCase : Optional[Any] = pipe("example prompt" , num_inference_steps=2).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE)
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowerCAmelCase : Optional[Any] = pipe("example prompt" , num_inference_steps=2).images[0]
assert image is not None
| 615
|
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : str = [0]
__lowerCAmelCase : Union[str, Any] = [0]
__lowerCAmelCase : List[Any] = len(_SCREAMING_SNAKE_CASE)
self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) , 0)
__lowerCAmelCase : Dict = [60]
__lowerCAmelCase : Optional[int] = [10]
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE)
self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) , 0)
def _SCREAMING_SNAKE_CASE ( self: Any) -> int:
"""simple docstring"""
__lowerCAmelCase : Dict = 3
__lowerCAmelCase : Dict = [1, 2, 3]
__lowerCAmelCase : str = [3, 2, 1]
__lowerCAmelCase : Optional[int] = len(_SCREAMING_SNAKE_CASE)
self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) , 5)
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = 50
__lowerCAmelCase : Dict = [60, 100, 120]
__lowerCAmelCase : List[Any] = [10, 20, 30]
__lowerCAmelCase : int = len(_SCREAMING_SNAKE_CASE)
self.assertEqual(k.knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE) , 220)
if __name__ == "__main__":
unittest.main()
| 615
| 1
|
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Dict = (DPMSolverSDEScheduler,)
__lowerCamelCase : Union[str, Any] = 10
def a__ (self , **A ) -> List[str]:
"""simple docstring"""
_a = {
'''num_train_timesteps''': 1_100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**A )
return config
def a__ (self ) -> Tuple:
"""simple docstring"""
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=A )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=A , beta_end=A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A )
def a__ (self ) -> int:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps )
_a = self.dummy_model()
_a = self.dummy_sample_deter * scheduler.init_noise_sigma
_a = sample.to(A )
for i, t in enumerate(scheduler.timesteps ):
_a = scheduler.scale_model_input(A , A )
_a = model(A , A )
_a = scheduler.step(A , A , A )
_a = output.prev_sample
_a = torch.sum(torch.abs(A ) )
_a = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(prediction_type='''v_prediction''' )
_a = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps )
_a = self.dummy_model()
_a = self.dummy_sample_deter * scheduler.init_noise_sigma
_a = sample.to(A )
for i, t in enumerate(scheduler.timesteps ):
_a = scheduler.scale_model_input(A , A )
_a = model(A , A )
_a = scheduler.step(A , A , A )
_a = output.prev_sample
_a = torch.sum(torch.abs(A ) )
_a = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3
def a__ (self ) -> List[str]:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps , device=A )
_a = self.dummy_model()
_a = self.dummy_sample_deter.to(A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_a = scheduler.scale_model_input(A , A )
_a = model(A , A )
_a = scheduler.step(A , A , A )
_a = output.prev_sample
_a = torch.sum(torch.abs(A ) )
_a = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def a__ (self ) -> Tuple:
"""simple docstring"""
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**A , use_karras_sigmas=A )
scheduler.set_timesteps(self.num_inference_steps , device=A )
_a = self.dummy_model()
_a = self.dummy_sample_deter.to(A ) * scheduler.init_noise_sigma
_a = sample.to(A )
for t in scheduler.timesteps:
_a = scheduler.scale_model_input(A , A )
_a = model(A , A )
_a = scheduler.step(A , A , A )
_a = output.prev_sample
_a = torch.sum(torch.abs(A ) )
_a = torch.mean(torch.abs(A ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
| 11
|
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
return getitem, k
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
return setitem, k, v
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
return delitem, k
def _a ( UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
try:
return fun(UpperCAmelCase , *UpperCAmelCase ), None
except Exception as e:
return None, e
_A : List[str] = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
_A : Optional[Any] = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
_A : str = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
_A : Any = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
_A : Dict = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
_A : List[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def _a ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Dict = HashMap(initial_block_size=4 )
lowerCamelCase__ : List[str] = {}
for _, (fun, *args) in enumerate(UpperCAmelCase ):
lowerCamelCase__ , lowerCamelCase__ : List[str] = _run_operation(UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Tuple = _run_operation(UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase )
assert my_res == py_res
assert str(UpperCAmelCase ) == str(UpperCAmelCase )
assert set(UpperCAmelCase ) == set(UpperCAmelCase )
assert len(UpperCAmelCase ) == len(UpperCAmelCase )
assert set(my.items() ) == set(py.items() )
def _a ( ) -> Any:
"""simple docstring"""
def is_public(UpperCAmelCase ) -> bool:
return not name.startswith('''_''' )
lowerCamelCase__ : List[Any] = {name for name in dir({} ) if is_public(UpperCAmelCase )}
lowerCamelCase__ : Dict = {name for name in dir(HashMap() ) if is_public(UpperCAmelCase )}
assert dict_public_names > hash_public_names
| 315
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( lowercase_ ):
def __init__( self : str , snake_case : Optional[Any] , snake_case : List[str] , snake_case : int , snake_case : Dict = None , ):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=snake_case , vae=snake_case , scheduler=snake_case )
# create a imagenet -> id dictionary for easier use
A__ : Dict = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
A__ : int = int(snake_case )
A__ : Tuple = dict(sorted(self.labels.items() ) )
def _UpperCamelCase ( self : Union[str, Any] , snake_case : Tuple ):
'''simple docstring'''
if not isinstance(snake_case , snake_case ):
A__ : int = list(snake_case )
for l in label:
if l not in self.labels:
raise ValueError(
F'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : List[Any] , snake_case : int , snake_case : Optional[int] = 4.0 , snake_case : List[str] = None , snake_case : Tuple = 50 , snake_case : List[str] = "pil" , snake_case : List[Any] = True , ):
'''simple docstring'''
A__ : Union[str, Any] = len(snake_case )
A__ : Optional[int] = self.transformer.config.sample_size
A__ : Tuple = self.transformer.config.in_channels
A__ : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=snake_case , device=self.device , dtype=self.transformer.dtype , )
A__ : Tuple = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
A__ : str = torch.tensor(snake_case , device=self.device ).reshape(-1 )
A__ : Tuple = torch.tensor([1000] * batch_size , device=self.device )
A__ : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
A__ : int = latent_model_input[: len(snake_case ) // 2]
A__ : Tuple = torch.cat([half, half] , dim=0 )
A__ : Any = self.scheduler.scale_model_input(snake_case , snake_case )
A__ : List[str] = t
if not torch.is_tensor(snake_case ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
A__ : str = latent_model_input.device.type == """mps"""
if isinstance(snake_case , snake_case ):
A__ : Tuple = torch.floataa if is_mps else torch.floataa
else:
A__ : Optional[int] = torch.intaa if is_mps else torch.intaa
A__ : List[Any] = torch.tensor([timesteps] , dtype=snake_case , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
A__ : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
A__ : Union[str, Any] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
A__ : Dict = self.transformer(
snake_case , timestep=snake_case , class_labels=snake_case ).sample
# perform guidance
if guidance_scale > 1:
A__ , A__ : str = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
A__ , A__ : Optional[Any] = torch.split(snake_case , len(snake_case ) // 2 , dim=0 )
A__ : Dict = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
A__ : Optional[int] = torch.cat([half_eps, half_eps] , dim=0 )
A__ : List[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
A__ , A__ : Union[str, Any] = torch.split(snake_case , snake_case , dim=1 )
else:
A__ : str = noise_pred
# compute previous image: x_t -> x_t-1
A__ : int = self.scheduler.step(snake_case , snake_case , snake_case ).prev_sample
if guidance_scale > 1:
A__ , A__ : Optional[Any] = latent_model_input.chunk(2 , dim=0 )
else:
A__ : List[str] = latent_model_input
A__ : Optional[int] = 1 / self.vae.config.scaling_factor * latents
A__ : List[str] = self.vae.decode(snake_case ).sample
A__ : List[str] = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ : Dict = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A__ : List[Any] = self.numpy_to_pil(snake_case )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=snake_case )
| 720
|
"""simple docstring"""
from __future__ import annotations
A_ = 10
def _lowerCAmelCase ( UpperCAmelCase__ : list[int] ) ->list[int]:
A__ : Any = 1
A__ : Optional[int] = max(UpperCAmelCase__ )
while placement <= max_digit:
# declare and initialize empty buckets
A__ : list[list] = [[] for _ in range(UpperCAmelCase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
A__ : List[Any] = int((i / placement) % RADIX )
buckets[tmp].append(UpperCAmelCase__ )
# put each buckets' contents into list_of_ints
A__ : Union[str, Any] = 0
for b in range(UpperCAmelCase__ ):
for i in buckets[b]:
A__ : str = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 498
| 0
|
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int=7 ):
UpperCAmelCase = None
if token is not None:
UpperCAmelCase = {'Accept': 'application/vnd.github+json', 'Authorization': f'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
UpperCAmelCase = '636036'
UpperCAmelCase = f'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
UpperCAmelCase = requests.get(SCREAMING_SNAKE_CASE , headers=SCREAMING_SNAKE_CASE ).json()
return result["workflow_runs"]
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : int ):
UpperCAmelCase = get_daily_ci_runs(SCREAMING_SNAKE_CASE )
UpperCAmelCase = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
UpperCAmelCase = workflow_run['id']
break
return workflow_run_id
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
UpperCAmelCase = get_last_daily_ci_runs(SCREAMING_SNAKE_CASE )
if workflow_run_id is not None:
UpperCAmelCase = get_artifacts_links(worflow_run_id=SCREAMING_SNAKE_CASE , token=SCREAMING_SNAKE_CASE )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
UpperCAmelCase = artifacts_links[artifact_name]
download_artifact(
artifact_name=SCREAMING_SNAKE_CASE , artifact_url=SCREAMING_SNAKE_CASE , output_dir=SCREAMING_SNAKE_CASE , token=SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] ):
get_last_daily_ci_artifacts(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCAmelCase = {}
for artifact_name in artifact_names:
UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE , f'''{artifact_name}.zip''' )
if os.path.isfile(SCREAMING_SNAKE_CASE ):
UpperCAmelCase = {}
with zipfile.ZipFile(SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
# read the file
with z.open(SCREAMING_SNAKE_CASE ) as f:
UpperCAmelCase = f.read().decode('UTF-8' )
return results
| 447
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = 'ylacombe/bark-small'
UpperCAmelCase = tempfile.mkdtemp()
UpperCAmelCase = 'en_speaker_1'
UpperCAmelCase = 'This is a test string'
UpperCAmelCase = 'speaker_embeddings_path.json'
UpperCAmelCase = 'speaker_embeddings'
def snake_case_ ( self , **a_ ) -> List[str]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **a_ )
def snake_case_ ( self ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BarkProcessor(tokenizer=a_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def snake_case_ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCAmelCase = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCAmelCase = 3_5
UpperCAmelCase = 2
UpperCAmelCase = 8
UpperCAmelCase = {
'semantic_prompt': np.ones(a_ ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase = processor(text=self.input_string , voice_preset=a_ )
UpperCAmelCase = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(a_ , **a_ )
UpperCAmelCase = processor(text=self.input_string , voice_preset=a_ )
UpperCAmelCase = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase = processor(text=self.input_string , voice_preset=self.voice_preset )
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = BarkProcessor(tokenizer=a_ )
UpperCAmelCase = processor(text=self.input_string )
UpperCAmelCase = tokenizer(
self.input_string , padding='max_length' , max_length=2_5_6 , add_special_tokens=a_ , return_attention_mask=a_ , return_token_type_ids=a_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 447
| 1
|
import math
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = len(UpperCamelCase__ )
snake_case_ = int(math.floor(math.sqrt(UpperCamelCase__ ) ) )
snake_case_ = 0
while arr[min(UpperCamelCase__ , UpperCamelCase__ ) - 1] < x:
snake_case_ = step
step += int(math.floor(math.sqrt(UpperCamelCase__ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
snake_case_ = prev + 1
if prev == min(UpperCamelCase__ , UpperCamelCase__ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
_UpperCAmelCase : Dict = input("""Enter numbers separated by a comma:\n""").strip()
_UpperCAmelCase : int = [int(item) for item in user_input.split(""",""")]
_UpperCAmelCase : Union[str, Any] = int(input("""Enter the number to be searched:\n"""))
_UpperCAmelCase : Optional[Any] = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(F'''Number {x} is at index {res}''')
| 721
|
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
_UpperCAmelCase : Any = logging.get_logger(__name__)
class lowercase ( lowercase_ ):
def __init__( self , *snake_case , **snake_case ):
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' , snake_case , )
super().__init__(*snake_case , **snake_case )
| 108
| 0
|
from __future__ import annotations
from fractions import Fraction
def UpperCamelCase_( _A :int , _A :int )-> bool:
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def UpperCamelCase_( _A :int )-> list[str]:
UpperCamelCase__ = []
UpperCamelCase__ = 11
UpperCamelCase__ = int("1" + "0" * digit_len )
for num in range(_A , _A ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_A , _A ):
solutions.append(F'''{num}/{den}''' )
den += 1
num += 1
UpperCamelCase__ = 10
return solutions
def UpperCamelCase_( _A :int = 2 )-> int:
UpperCamelCase__ = 1.0
for fraction in fraction_list(_A ):
UpperCamelCase__ = Fraction(_A )
result *= frac.denominator / frac.numerator
return int(_A )
if __name__ == "__main__":
print(solution())
| 551
|
def UpperCamelCase_( _A :Union[str, Any] )-> List[str]:
UpperCamelCase__ = [0] * len(_A )
UpperCamelCase__ = []
UpperCamelCase__ = []
UpperCamelCase__ = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_A ) ):
if indegree[i] == 0:
queue.append(_A )
while queue:
UpperCamelCase__ = queue.pop(0 )
cnt += 1
topo.append(_A )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_A )
if cnt != len(_A ):
print("Cycle exists" )
else:
print(_A )
# Adjacency List of Graph
__UpperCamelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 551
| 1
|
"""simple docstring"""
from __future__ import annotations
def A__ ( _UpperCAmelCase : list[int] , _UpperCAmelCase : list[int] , _UpperCAmelCase : int ) -> tuple[float, list[float]]:
'''simple docstring'''
snake_case__ : Any = list(range(len(_UpperCAmelCase ) ) )
snake_case__ : List[Any] = [v / w for v, w in zip(_UpperCAmelCase , _UpperCAmelCase )]
index.sort(key=lambda _UpperCAmelCase : ratio[i] , reverse=_UpperCAmelCase )
snake_case__ : float = 0
snake_case__ : list[float] = [0] * len(_UpperCAmelCase )
for i in index:
if weight[i] <= capacity:
snake_case__ : Dict = 1
max_value += value[i]
capacity -= weight[i]
else:
snake_case__ : Optional[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowercase = """\
Text data.
Second line of data."""
lowercase = """file"""
@pytest.fixture(scope="session" )
def A__ ( _UpperCAmelCase : Union[str, Any] ) -> str:
'''simple docstring'''
snake_case__ : Any = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
snake_case__ : Optional[int] = bytes(_UpperCAmelCase , "utf-8" )
with zstd.open(_UpperCAmelCase , "wb" ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture
def A__ ( _UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _UpperCAmelCase ) , "w" ) as f:
f.write(_UpperCAmelCase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def A__ ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
snake_case__ : str = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
snake_case__ : List[str] = input_paths[compression_format]
snake_case__ : List[str] = tmp_path / "cache"
snake_case__ : Tuple = DownloadConfig(cache_dir=_UpperCAmelCase , extract_compressed_file=_UpperCAmelCase )
snake_case__ : Any = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
with open(_UpperCAmelCase ) as f:
snake_case__ : str = f.read()
with open(_UpperCAmelCase ) as f:
snake_case__ : List[Any] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def A__ ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Dict:
'''simple docstring'''
snake_case__ : List[str] = "custom_cache"
snake_case__ : Any = "custom_extracted_dir"
snake_case__ : List[str] = tmp_path / "custom_extracted_path"
if default_extracted:
snake_case__ : Tuple = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , _UpperCAmelCase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_UpperCAmelCase ) )
snake_case__ : Optional[int] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
snake_case__ : List[Any] = xz_file
snake_case__ : Union[str, Any] = (
DownloadConfig(extract_compressed_file=_UpperCAmelCase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_UpperCAmelCase )
)
snake_case__ : List[Any] = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase )
assert Path(_UpperCAmelCase ).parent.parts[-2:] == expected
def A__ ( _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : List[str] = str(Path(_UpperCAmelCase ).resolve() )
assert cached_path(_UpperCAmelCase ) == text_file
# relative path
snake_case__ : List[str] = str(Path(_UpperCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_UpperCAmelCase ) == text_file
def A__ ( _UpperCAmelCase : Tuple ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Optional[int] = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
# relative path
snake_case__ : Optional[int] = "./__missing_file__.txt"
with pytest.raises(_UpperCAmelCase ):
cached_path(_UpperCAmelCase )
def A__ ( _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(_UpperCAmelCase ) as f:
snake_case__ : List[str] = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , _UpperCAmelCase )
def A__ ( ) -> Dict:
'''simple docstring'''
with pytest.raises(_UpperCAmelCase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _UpperCAmelCase )
def A__ ( _UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
snake_case__ : int = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_UpperCAmelCase ):
http_get("https://huggingface.co" , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _UpperCAmelCase )
def A__ ( _UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_UpperCAmelCase ):
ftp_get("ftp://huggingface.co" , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , _UpperCAmelCase )
def A__ ( _UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Dict = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(_UpperCAmelCase ):
fsspec_get("s3://huggingface.co" , temp_file=_UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
fsspec_head("s3://huggingface.co" )
| 150
| 0
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_snake_case : Optional[Any] = False
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : int , lowerCamelCase : Optional[Any]=32 ) -> Dict:
set_seed(0 )
__snake_case : int = UNetaDModel(sample_size=lowerCamelCase , in_channels=3 , out_channels=3 )
__snake_case : List[Any] = torch.optim.SGD(model.parameters() , lr=0.00_01 )
return model, optimizer
@slow
def __snake_case ( self : str ) -> List[Any]:
__snake_case : int = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
__snake_case : Optional[Any] = DDPMScheduler(
num_train_timesteps=1000 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule="linear" , clip_sample=lowerCamelCase , )
__snake_case : Tuple = DDIMScheduler(
num_train_timesteps=1000 , beta_start=0.00_01 , beta_end=0.02 , beta_schedule="linear" , clip_sample=lowerCamelCase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
__snake_case : Tuple = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowerCamelCase ) for _ in range(4 )]
__snake_case : str = [torch.randn((4, 3, 32, 32) ).to(lowerCamelCase ) for _ in range(4 )]
__snake_case : List[Any] = [torch.randint(0 , 1000 , (4,) ).long().to(lowerCamelCase ) for _ in range(4 )]
# train with a DDPM scheduler
__snake_case , __snake_case : str = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCamelCase )
for i in range(4 ):
optimizer.zero_grad()
__snake_case : Optional[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__snake_case : List[str] = model(lowerCamelCase , timesteps[i] ).sample
__snake_case : Tuple = torch.nn.functional.mse_loss(lowerCamelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
__snake_case , __snake_case : Optional[int] = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCamelCase )
for i in range(4 ):
optimizer.zero_grad()
__snake_case : str = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__snake_case : Any = model(lowerCamelCase , timesteps[i] ).sample
__snake_case : Tuple = torch.nn.functional.mse_loss(lowerCamelCase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-5 ) )
self.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-5 ) )
| 81
|
from __future__ import annotations
from typing import Any
def lowerCAmelCase_ ( __lowerCamelCase ):
create_state_space_tree(__lowerCamelCase , [] , 0 )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if index == len(__lowerCamelCase ):
print(__lowerCamelCase )
return
create_state_space_tree(__lowerCamelCase , __lowerCamelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__lowerCamelCase , __lowerCamelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_snake_case : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 81
| 1
|
import unittest
from knapsack import knapsack as k
class a_( unittest.TestCase ):
"""simple docstring"""
def __UpperCamelCase ( self : str) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = [0]
SCREAMING_SNAKE_CASE = [0]
SCREAMING_SNAKE_CASE = len(lowerCAmelCase__)
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) , 0)
SCREAMING_SNAKE_CASE = [6_0]
SCREAMING_SNAKE_CASE = [1_0]
SCREAMING_SNAKE_CASE = len(lowerCAmelCase__)
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) , 0)
def __UpperCamelCase ( self : Dict) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = [1, 2, 3]
SCREAMING_SNAKE_CASE = [3, 2, 1]
SCREAMING_SNAKE_CASE = len(lowerCAmelCase__)
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) , 5)
def __UpperCamelCase ( self : List[Any]) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE = 5_0
SCREAMING_SNAKE_CASE = [6_0, 1_0_0, 1_2_0]
SCREAMING_SNAKE_CASE = [1_0, 2_0, 3_0]
SCREAMING_SNAKE_CASE = len(lowerCAmelCase__)
self.assertEqual(k.knapsack(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) , 2_2_0)
if __name__ == "__main__":
unittest.main()
| 259
|
import argparse
from collections import defaultdict
import yaml
__UpperCAmelCase = "docs/source/en/_toctree.yml"
def A_ ( lowercase_ ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = defaultdict(lowercase_ )
for doc in model_doc:
counts[doc["local"]] += 1
SCREAMING_SNAKE_CASE = [key for key, value in counts.items() if value > 1]
SCREAMING_SNAKE_CASE = []
for duplicate_key in duplicates:
SCREAMING_SNAKE_CASE = list({doc['title'] for doc in model_doc if doc['local'] == duplicate_key} )
if len(lowercase_ ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['local']] == 1] )
# Sort
return sorted(lowercase_ , key=lambda lowercase_ : s["title"].lower() )
def A_ ( lowercase_=False ) ->List[Any]:
"""simple docstring"""
with open(lowercase_ , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE = yaml.safe_load(f.read() )
# Get to the API doc
SCREAMING_SNAKE_CASE = 0
while content[api_idx]["title"] != "API":
api_idx += 1
SCREAMING_SNAKE_CASE = content[api_idx]['sections']
# Then to the model doc
SCREAMING_SNAKE_CASE = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
SCREAMING_SNAKE_CASE = api_doc[model_idx]['sections']
SCREAMING_SNAKE_CASE = [(idx, section) for idx, section in enumerate(lowercase_ ) if 'sections' in section]
SCREAMING_SNAKE_CASE = False
for idx, modality_doc in modalities_docs:
SCREAMING_SNAKE_CASE = modality_doc['sections']
SCREAMING_SNAKE_CASE = clean_model_doc_toc(lowercase_ )
if old_modality_doc != new_modality_doc:
SCREAMING_SNAKE_CASE = True
if overwrite:
SCREAMING_SNAKE_CASE = new_modality_doc
if diff:
if overwrite:
SCREAMING_SNAKE_CASE = model_doc
SCREAMING_SNAKE_CASE = api_doc
with open(lowercase_ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(lowercase_ , allow_unicode=lowercase_ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__UpperCAmelCase = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 259
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=[30, 30] , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=8 , SCREAMING_SNAKE_CASE=10 , ) -> Dict:
"""simple docstring"""
A : List[Any] = parent
A : Optional[Any] = batch_size
A : int = image_size
A : Optional[Any] = patch_size
A : Union[str, Any] = num_channels
A : Optional[int] = is_training
A : List[str] = use_labels
A : Any = hidden_size
A : Dict = num_hidden_layers
A : List[str] = num_attention_heads
A : List[Any] = intermediate_size
A : List[Any] = hidden_act
A : Union[str, Any] = hidden_dropout_prob
A : Union[str, Any] = attention_probs_dropout_prob
A : Optional[Any] = type_sequence_label_size
A : Optional[Any] = initializer_range
A : Optional[Any] = num_labels
A : Union[str, Any] = scope
A : Optional[Any] = n_targets
A : str = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
A : Dict = (image_size[1] // patch_size) * (image_size[0] // patch_size)
A : str = num_patches + 1 + self.num_detection_tokens
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
A : Dict = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
A : int = []
for i in range(self.batch_size ):
A : str = {}
A : Union[str, Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=SCREAMING_SNAKE_CASE )
A : Optional[Any] = torch.rand(self.n_targets , 4 , device=SCREAMING_SNAKE_CASE )
labels.append(SCREAMING_SNAKE_CASE )
A : Any = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : int = YolosModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Optional[int] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : int = YolosForObjectDetection(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
A : Any = model(pixel_values=SCREAMING_SNAKE_CASE )
A : Tuple = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
A : List[str] = model(pixel_values=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : Any = self.prepare_config_and_inputs()
A, A, A : int = config_and_inputs
A : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A ( __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
__magic_name__ = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
"""simple docstring"""
A : Union[str, Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
A : str = []
for i in range(self.model_tester.batch_size ):
A : Optional[Any] = {}
A : int = torch.ones(
size=(self.model_tester.n_targets,) , device=SCREAMING_SNAKE_CASE , dtype=torch.long )
A : Any = torch.ones(
self.model_tester.n_targets , 4 , device=SCREAMING_SNAKE_CASE , dtype=torch.float )
labels.append(SCREAMING_SNAKE_CASE )
A : Optional[int] = labels
return inputs_dict
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Tuple = YolosModelTester(self )
A : Any = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
pass
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A, A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Dict = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A, A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : List[Any] = model_class(SCREAMING_SNAKE_CASE )
A : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : List[str] = [*signature.parameters.keys()]
A : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A, A : Any = self.model_tester.prepare_config_and_inputs_for_common()
A : List[Any] = True
# in YOLOS, the seq_len is different
A : Optional[int] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
A : Optional[int] = True
A : int = False
A : List[str] = True
A : str = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
A : Optional[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
A : int = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A : int = True
A : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
A : List[str] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
A : List[Any] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
A : Any = len(SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
A : Optional[Any] = True
A : Union[str, Any] = True
A : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
A : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
A : Optional[int] = 1
self.assertEqual(out_len + added_hidden_states , len(SCREAMING_SNAKE_CASE ) )
A : List[str] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A : List[Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
A : List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
A : Dict = outputs.hidden_states
A : List[Any] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
# YOLOS has a different seq_length
A : int = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A, A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : List[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A : Optional[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : int = YolosModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''hustvl/yolos-small''' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Union[str, Any] = YolosForObjectDetection.from_pretrained('''hustvl/yolos-small''' ).to(SCREAMING_SNAKE_CASE )
A : str = self.default_image_processor
A : Any = prepare_img()
A : Any = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
A : Optional[Any] = model(inputs.pixel_values )
# verify outputs
A : List[Any] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
A : Optional[Any] = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=SCREAMING_SNAKE_CASE , )
A : str = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
# verify postprocessing
A : int = image_processor.post_process_object_detection(
SCREAMING_SNAKE_CASE , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
A : str = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(SCREAMING_SNAKE_CASE )
A : Optional[Any] = [75, 75, 17, 63, 17]
A : Union[str, Any] = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(SCREAMING_SNAKE_CASE )
self.assertEqual(len(results['''scores'''] ) , 5 )
self.assertTrue(torch.allclose(results['''scores'''] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
self.assertSequenceEqual(results['''labels'''].tolist() , SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(results['''boxes'''][0, :] , SCREAMING_SNAKE_CASE ) )
| 634
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Tuple = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 634
| 1
|
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase , UpperCamelCase=2 , UpperCamelCase=8 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=99 , UpperCamelCase=16 , UpperCamelCase=5 , UpperCamelCase=2 , UpperCamelCase=36 , UpperCamelCase="gelu" , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=512 , UpperCamelCase=16 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=None , ) -> Tuple:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self ) -> int:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = self.get_config()
__lowerCAmelCase = 300
return config
def UpperCAmelCase_ ( self ) -> List[str]:
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
__lowerCAmelCase = True
__lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple:
__lowerCAmelCase = MraModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__lowerCAmelCase = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
__lowerCAmelCase = model(UpperCamelCase , token_type_ids=UpperCamelCase )
__lowerCAmelCase = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> List[str]:
__lowerCAmelCase = True
__lowerCAmelCase = MraModel(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__lowerCAmelCase = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , )
__lowerCAmelCase = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , encoder_hidden_states=UpperCamelCase , )
__lowerCAmelCase = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
__lowerCAmelCase = MraForMaskedLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__lowerCAmelCase = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
__lowerCAmelCase = MraForQuestionAnswering(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__lowerCAmelCase = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MraForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__lowerCAmelCase = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = MraForTokenClassification(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__lowerCAmelCase = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]:
__lowerCAmelCase = self.num_choices
__lowerCAmelCase = MraForMultipleChoice(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__lowerCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase_ ( self ) -> Dict:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ):
a : Optional[int] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
a : List[Any] = False
a : Any = False
a : Tuple = False
a : Optional[int] = False
a : str = ()
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = MraModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 )
def UpperCAmelCase_ ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase = type
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
def UpperCAmelCase_ ( self ) -> int:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase )
@slow
def UpperCAmelCase_ ( self ) -> Optional[int]:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = MraModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@unittest.skip(reason="MRA does not output attentions" )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
return
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
__lowerCAmelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(UpperCamelCase )[0]
__lowerCAmelCase = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , UpperCamelCase )
__lowerCAmelCase = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase , atol=1E-4 ) )
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
__lowerCAmelCase = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
__lowerCAmelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(UpperCamelCase )[0]
__lowerCAmelCase = 5_0265
__lowerCAmelCase = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , UpperCamelCase )
__lowerCAmelCase = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase , atol=1E-4 ) )
@slow
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
__lowerCAmelCase = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
__lowerCAmelCase = model(UpperCamelCase )[0]
__lowerCAmelCase = 5_0265
__lowerCAmelCase = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , UpperCamelCase )
__lowerCAmelCase = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase , atol=1E-4 ) )
| 39
|
'''simple docstring'''
import re
def __lowerCAmelCase ( lowerCamelCase : str ):
'''simple docstring'''
__lowerCAmelCase = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(lowerCamelCase , lowerCamelCase ) )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 39
| 1
|
'''simple docstring'''
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ) -> Any:
return 1.0 / (1.0 + np.exp(-_outputs ))
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Dict ) -> str:
_a : Union[str, Any] =np.max(_outputs ,axis=-1 ,keepdims=_UpperCAmelCase )
_a : Optional[int] =np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 ,keepdims=_UpperCAmelCase )
class A__ ( a_ ):
__UpperCamelCase : List[str] = '''sigmoid'''
__UpperCamelCase : List[Any] = '''softmax'''
__UpperCamelCase : Optional[Any] = '''none'''
@add_end_docstrings(
a_ , r"\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `\"default\"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `\"sigmoid\"`: Applies the sigmoid function on the output.\n - `\"softmax\"`: Applies the softmax function on the output.\n - `\"none\"`: Does not apply any function on the output.\n " , )
class A__ ( a_ ):
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : Dict = ClassificationFunction.NONE
def __init__( self :Union[str, Any] , **SCREAMING_SNAKE_CASE :Dict ) -> Optional[int]:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def __UpperCAmelCase ( self :str , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :Dict=None , SCREAMING_SNAKE_CASE :Optional[Any]="" , **SCREAMING_SNAKE_CASE :Any ) -> Dict:
'''simple docstring'''
_a : Optional[int] =tokenizer_kwargs
_a : List[Any] ={}
if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None:
_a : Dict =self.model.config.return_all_scores
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or top_k is None:
_a : Optional[Any] =top_k
_a : List[str] =False
elif return_all_scores is not None:
warnings.warn(
"""`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"""
""" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , _SCREAMING_SNAKE_CASE , )
if return_all_scores:
_a : Optional[int] =None
else:
_a : Tuple =1
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_a : Tuple =ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_a : List[str] =function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self :Dict , *SCREAMING_SNAKE_CASE :Optional[int] , **SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Optional[int] =super().__call__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_a : Optional[int] ="""top_k""" not in kwargs
if isinstance(args[0] , _SCREAMING_SNAKE_CASE ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :List[str] , **SCREAMING_SNAKE_CASE :Optional[Any] ) -> List[Any]:
'''simple docstring'''
_a : Optional[Any] =self.framework
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return self.tokenizer(**_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) == 1 and isinstance(inputs[0] , _SCREAMING_SNAKE_CASE ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"""The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"""
""" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" )
return self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :str ) -> int:
'''simple docstring'''
return self.model(**_SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :List[Any]=None , SCREAMING_SNAKE_CASE :str=1 , SCREAMING_SNAKE_CASE :List[str]=True ) -> Tuple:
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_a : List[str] =ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_a : str =ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None:
_a : Any =self.model.config.function_to_apply
else:
_a : Optional[Any] =ClassificationFunction.NONE
_a : Tuple =model_outputs["""logits"""][0]
_a : List[str] =outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_a : List[Any] =sigmoid(_SCREAMING_SNAKE_CASE )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_a : List[Any] =softmax(_SCREAMING_SNAKE_CASE )
elif function_to_apply == ClassificationFunction.NONE:
_a : Dict =outputs
else:
raise ValueError(f"Unrecognized `function_to_apply` argument: {function_to_apply}" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_a : Optional[int] =[
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(_SCREAMING_SNAKE_CASE )
]
if not _legacy:
dict_scores.sort(key=lambda SCREAMING_SNAKE_CASE : x["score"] , reverse=_SCREAMING_SNAKE_CASE )
if top_k is not None:
_a : Optional[int] =dict_scores[:top_k]
return dict_scores
| 694
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_UpperCamelCase : str = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 284
| 0
|
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> bool:
snake_case_ = [int(_SCREAMING_SNAKE_CASE ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(_SCREAMING_SNAKE_CASE ) == 4 and all(0 <= int(_SCREAMING_SNAKE_CASE ) <= 254 for octet in octets )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Any = input().strip()
__SCREAMING_SNAKE_CASE : int = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(f"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 709
|
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
if index == number_of_items:
return 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , index + 1 )
if weights[index] <= max_weight:
snake_case_ = values[index] + knapsack(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 )
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2
| 0
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__magic_name__ :Optional[Any] = flax_key_tuple[:-1] + ('''weight''',)
__magic_name__ :Tuple = torch.permute(snake_case, (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case ):
# linear layer
__magic_name__ :Dict = flax_key_tuple[:-1] + ('''weight''',)
__magic_name__ :Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__magic_name__ :List[str] = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
if "metadata" in layer:
__magic_name__ :List[str] = layer.split('''metadata''' )
__magic_name__ :int = ''''''.join(split_layer[0] )[:-1]
__magic_name__ :Optional[Any] = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
__magic_name__ :Union[str, Any] = layer.split('''kvstore''' )
__magic_name__ :int = ''''''.join(split_layer[0] )[:-1]
__magic_name__ :List[str] = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
__magic_name__ :Dict = layer.split('''/''' )
__magic_name__ :Union[str, Any] = '''/'''.join(split_layer[:-1] )
__magic_name__ :Dict = (split_layer[-1],)
if "kvstore/path" in layer:
__magic_name__ :Optional[Any] = f'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
__magic_name__ :Optional[Any] = '''file'''
else:
__magic_name__ :Union[str, Any] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = rename_keys(snake_case )
__magic_name__ :List[str] = {}
for k, v in current_block.items():
__magic_name__ :Union[str, Any] = v
__magic_name__ :List[str] = new_current_block
torch.save(snake_case, snake_case )
def __lowercase ( snake_case, snake_case, snake_case, snake_case, snake_case = WEIGHTS_NAME ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = convert_file_size_to_int(snake_case )
__magic_name__ :Union[str, Any] = []
__magic_name__ :Optional[Any] = {}
__magic_name__ :Optional[int] = 0
__magic_name__ :Optional[int] = 0
os.makedirs(snake_case, exist_ok=snake_case )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''', '''rb''' ) as fp:
__magic_name__ :List[Any] = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
__magic_name__ :List[Any] = flatten_dict(snake_case, sep='''/''' )
__magic_name__ :Any = {}
for layer in checkpoint_info.keys():
__magic_name__ , __magic_name__ , __magic_name__ :Optional[Any] = get_key_and_tensorstore_dict(
snake_case, snake_case, snake_case )
if curr_real_layer_name in all_layers:
__magic_name__ :str = content
else:
__magic_name__ :Union[str, Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__magic_name__ :Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__magic_name__ :str = torch.tensor(snake_case )
__magic_name__ :List[str] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__magic_name__ , __magic_name__ :Optional[Any] = rename_base_flax_keys(tuple(key.split('''/''' ) ), snake_case )
__magic_name__ :Optional[Any] = '''/'''.join(snake_case )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__magic_name__ :Union[str, Any] = os.path.join(
snake_case, weights_name.replace('''.bin''', f'''-{len(snake_case )+1:05d}-of-???.bin''' ) )
rename_and_save_block(snake_case, snake_case )
sharded_state_dicts.append(current_block.keys() )
del current_block
__magic_name__ :Union[str, Any] = {}
__magic_name__ :List[str] = 0
__magic_name__ :int = raw_weights.to(getattr(snake_case, snake_case ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__magic_name__ :int = os.path.join(snake_case, weights_name.replace('''.bin''', f'''-{len(snake_case )+1:05d}-of-???.bin''' ) )
rename_and_save_block(snake_case, snake_case )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(snake_case ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__magic_name__ :Union[str, Any] = {}
__magic_name__ :Union[str, Any] = {}
for idx, shard in enumerate(snake_case ):
__magic_name__ :Union[str, Any] = weights_name.replace(
'''.bin''', f'''-{idx+1:05d}-of-{len(snake_case ):05d}.bin''' ) # len(sharded_state_dicts):05d}
__magic_name__ :Dict = os.path.join(snake_case, weights_name.replace('''.bin''', f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(snake_case, os.path.join(snake_case, snake_case ) )
__magic_name__ :str = shard
for key in shard:
__magic_name__ :List[str] = shard_file
# Add the metadata
__magic_name__ :List[Any] = {'''total_size''': total_size}
__magic_name__ :int = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(snake_case, snake_case ), '''w''', encoding='''utf-8''' ) as f:
__magic_name__ :Any = json.dumps(snake_case, indent=2, sort_keys=snake_case ) + '''\n'''
f.write(snake_case )
return metadata, index
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def __lowercase ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__magic_name__ :int = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
__magic_name__ :List[Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''', device_map='''auto''' )
__magic_name__ :int = TaTokenizer.from_pretrained('''t5-small''' )
__magic_name__ :List[Any] = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
__magic_name__ :Optional[Any] = tokenizer(snake_case, return_tensors='''pt''' ).input_ids
__magic_name__ :Any = model.generate(snake_case, decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 0
|
from __future__ import annotations
from math import pow, sqrt
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCAmelCase__ , 2 ) - pow(UpperCAmelCase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCAmelCase__ , 2 ) - pow(UpperCAmelCase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCAmelCase__ , 2 ) + pow(UpperCAmelCase__ , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 623
| 0
|
"""simple docstring"""
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self ):
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : Any = {}
def A_ ( self , lowercase ):
if vertex not in self.adjacency:
_lowerCamelCase : Optional[int] = {}
self.num_vertices += 1
def A_ ( self , lowercase , lowercase , lowercase ):
self.add_vertex(lowercase )
self.add_vertex(lowercase )
if head == tail:
return
_lowerCamelCase : Tuple = weight
_lowerCamelCase : Tuple = weight
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.get_edges()
for edge in edges:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = edge
edges.remove((tail, head, weight) )
for i in range(len(lowercase ) ):
_lowerCamelCase : List[str] = list(edges[i] )
edges.sort(key=lambda lowercase : e[2] )
for i in range(len(lowercase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
_lowerCamelCase : int = edges[i][2] + 1
for edge in edges:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = edge
_lowerCamelCase : List[str] = weight
_lowerCamelCase : Tuple = weight
def __str__( self ):
_lowerCamelCase : Union[str, Any] = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowerCamelCase : Dict = self.adjacency[head][tail]
string += F'''{head} -> {tail} == {weight}\n'''
return string.rstrip('\n' )
def A_ ( self ):
_lowerCamelCase : Tuple = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def A_ ( self ):
return self.adjacency.keys()
@staticmethod
def A_ ( lowercase=None , lowercase=None ):
_lowerCamelCase : int = Graph()
if vertices is None:
_lowerCamelCase : List[str] = []
if edges is None:
_lowerCamelCase : Tuple = []
for vertex in vertices:
g.add_vertex(lowercase )
for edge in edges:
g.add_edge(*lowercase )
return g
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self ):
_lowerCamelCase : Dict = {}
_lowerCamelCase : List[str] = {}
def __len__( self ):
return len(self.parent )
def A_ ( self , lowercase ):
if item in self.parent:
return self.find(lowercase )
_lowerCamelCase : List[Any] = item
_lowerCamelCase : Optional[Any] = 0
return item
def A_ ( self , lowercase ):
if item not in self.parent:
return self.make_set(lowercase )
if item != self.parent[item]:
_lowerCamelCase : List[Any] = self.find(self.parent[item] )
return self.parent[item]
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : Union[str, Any] = self.find(lowercase )
_lowerCamelCase : int = self.find(lowercase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowerCamelCase : int = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowerCamelCase : Optional[int] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowerCamelCase : Optional[Any] = roota
return roota
return None
@staticmethod
def A_ ( lowercase ):
_lowerCamelCase : Optional[Any] = graph.num_vertices
_lowerCamelCase : Any = Graph.UnionFind()
_lowerCamelCase : List[Any] = []
while num_components > 1:
_lowerCamelCase : int = {}
for vertex in graph.get_vertices():
_lowerCamelCase : Union[str, Any] = -1
_lowerCamelCase : Optional[int] = graph.get_edges()
for edge in edges:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = edge
edges.remove((tail, head, weight) )
for edge in edges:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = edge
_lowerCamelCase : Optional[Any] = union_find.find(lowercase )
_lowerCamelCase : List[str] = union_find.find(lowercase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCamelCase : List[Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCamelCase : Dict = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = cheap_edge[vertex]
if union_find.find(lowercase ) != union_find.find(lowercase ):
union_find.union(lowercase , lowercase )
mst_edges.append(cheap_edge[vertex] )
_lowerCamelCase : List[str] = num_components - 1
_lowerCamelCase : Any = Graph.build(edges=lowercase )
return mst
| 492
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """realm"""
def __init__( self , lowercase=30522 , lowercase=768 , lowercase=128 , lowercase=12 , lowercase=12 , lowercase=8 , lowercase=3072 , lowercase="gelu_new" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=256 , lowercase=10 , lowercase=1E-3 , lowercase=5 , lowercase=320 , lowercase=13353718 , lowercase=5000 , lowercase=1 , lowercase=0 , lowercase=2 , **lowercase , ):
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
# Common config
_lowerCamelCase : str = vocab_size
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : int = hidden_size
_lowerCamelCase : Optional[Any] = retriever_proj_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : int = num_candidates
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : int = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : int = layer_norm_eps
# Reader config
_lowerCamelCase : Tuple = span_hidden_size
_lowerCamelCase : int = max_span_width
_lowerCamelCase : Tuple = reader_layer_norm_eps
_lowerCamelCase : Union[str, Any] = reader_beam_size
_lowerCamelCase : Union[str, Any] = reader_seq_len
# Retrieval config
_lowerCamelCase : Optional[Any] = num_block_records
_lowerCamelCase : str = searcher_beam_size
| 492
| 1
|
"""simple docstring"""
from __future__ import annotations
class lowercase__ :
'''simple docstring'''
def __init__( self : List[str] , _UpperCAmelCase : int ) -> None:
'''simple docstring'''
UpperCAmelCase_ = data
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def a__ ( lowerCAmelCase__ ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def a__ ( lowerCAmelCase__ ):
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def a__ ( lowerCAmelCase__ ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def a__ ( ): # Main function for testing.
UpperCAmelCase_ = Node(1 )
UpperCAmelCase_ = Node(2 )
UpperCAmelCase_ = Node(3 )
UpperCAmelCase_ = Node(4 )
UpperCAmelCase_ = Node(5 )
UpperCAmelCase_ = Node(6 )
UpperCAmelCase_ = Node(7 )
UpperCAmelCase_ = Node(8 )
UpperCAmelCase_ = Node(9 )
print(is_full_binary_tree(lowerCAmelCase__ ) )
print(depth_of_tree(lowerCAmelCase__ ) )
print("Tree is: " )
display(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 82
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class _UpperCAmelCase :
def __init__( self , lowercase_ ) -> None:
UpperCAmelCase = value
UpperCAmelCase = None
UpperCAmelCase = None
class _UpperCAmelCase :
def __init__( self , lowercase_ ) -> None:
UpperCAmelCase = tree
def a_ ( self , lowercase_ ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 373
| 0
|
'''simple docstring'''
from collections.abc import Sequence
def a ( __a , __a ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(__a ) )
def a ( __a , __a ) -> float:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = 0.0
for coeff in reversed(__a ):
UpperCamelCase__ :Dict = result * x + coeff
return result
if __name__ == "__main__":
__snake_case = (0.0, 0.0, 5.0, 9.3, 7.0)
__snake_case = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 280
|
'''simple docstring'''
import numpy as np
def a ( __a , __a , __a , __a , __a ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Tuple = int(np.ceil((x_end - xa) / h ) )
UpperCamelCase__ :Optional[int] = np.zeros((n + 1,) )
UpperCamelCase__ :List[str] = ya
UpperCamelCase__ :Tuple = xa
for k in range(__a ):
UpperCamelCase__ :Dict = f(__a , y[k] )
UpperCamelCase__ :List[Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
UpperCamelCase__ :Dict = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
UpperCamelCase__ :List[Any] = f(x + h , y[k] + h * ka )
UpperCamelCase__ :List[str] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase ( unittest.TestCase):
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , __SCREAMING_SNAKE_CASE=[1, 1, 2, 1] , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=None , ) -> List[str]:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = num_channels
__snake_case = embeddings_size
__snake_case = hidden_sizes
__snake_case = depths
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_act
__snake_case = num_labels
__snake_case = scope
__snake_case = len(__lowerCamelCase )
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = FlaxRegNetModel(config=__lowerCamelCase )
__snake_case = model(__lowerCamelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__snake_case = self.num_labels
__snake_case = FlaxRegNetForImageClassification(config=__lowerCamelCase )
__snake_case = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
__snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class lowerCAmelCase ( _snake_case , unittest.TestCase):
__lowercase : Optional[int] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__lowercase : Optional[int] = False
__lowercase : str = False
__lowercase : Tuple = False
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = FlaxRegNetModelTester(self )
__snake_case = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return
def lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(__lowerCamelCase )
__snake_case = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
def check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case = model_class(__lowerCamelCase )
__snake_case = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__snake_case = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__snake_case = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
__snake_case = model_class(__lowerCamelCase )
@jax.jit
def model_jitted(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return model(pixel_values=__lowerCamelCase , **__lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
__snake_case = model_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__snake_case = model_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCamelCase ()-> Dict:
'''simple docstring'''
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_flax
class lowerCAmelCase ( unittest.TestCase):
@cached_property
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=__lowerCamelCase , return_tensors='''np''' )
__snake_case = model(**__lowerCamelCase )
# verify the logits
__snake_case = (1, 1000)
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__snake_case = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 ) )
| 24
|
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class UpperCAmelCase ( nn.Module ):
def __init__( self : int , __lowerCamelCase : int = 1_6 , __lowerCamelCase : int = 8_8 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : int = 3_2 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : str = "geglu" , __lowerCamelCase : Optional[int] = None , ):
super().__init__()
UpperCAmelCase__ :Optional[int] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__lowerCamelCase , attention_head_dim=__lowerCamelCase , in_channels=__lowerCamelCase , num_layers=__lowerCamelCase , dropout=__lowerCamelCase , norm_num_groups=__lowerCamelCase , cross_attention_dim=__lowerCamelCase , attention_bias=__lowerCamelCase , sample_size=__lowerCamelCase , num_vector_embeds=__lowerCamelCase , activation_fn=__lowerCamelCase , num_embeds_ada_norm=__lowerCamelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCAmelCase__ :Tuple = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCAmelCase__ :int = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCAmelCase__ :Any = [1, 0]
def __SCREAMING_SNAKE_CASE ( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : int=None , __lowerCamelCase : Dict=None , __lowerCamelCase : Dict=None , __lowerCamelCase : bool = True , ):
UpperCAmelCase__ :Optional[int] = hidden_states
UpperCAmelCase__ :Dict = []
UpperCAmelCase__ :Tuple = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCAmelCase__ :List[Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCAmelCase__ :Dict = self.transformer_index_for_condition[i]
UpperCAmelCase__ :Tuple = self.transformers[transformer_index](
__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , timestep=__lowerCamelCase , cross_attention_kwargs=__lowerCamelCase , return_dict=__lowerCamelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCAmelCase__ :Optional[int] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCAmelCase__ :Optional[Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__lowerCamelCase )
| 467
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
snake_case_ : int = None
snake_case_ : Dict = logging.get_logger(__name__)
snake_case_ : Optional[int] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
snake_case_ : Any = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
snake_case_ : Tuple = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
snake_case_ : Optional[int] = "▁"
# Segments (not really needed)
snake_case_ : Dict = 0
snake_case_ : List[str] = 1
snake_case_ : List[Any] = 2
snake_case_ : Any = 3
snake_case_ : Optional[int] = 4
class __a (lowerCamelCase ):
__a : Union[str, Any] = VOCAB_FILES_NAMES
__a : Any = PRETRAINED_VOCAB_FILES_MAP
__a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : Union[str, Any] = "left"
__a : List[Any] = XLNetTokenizer
def __init__( self : List[str] , __magic_name__ : List[str]=None , __magic_name__ : str=None , __magic_name__ : Optional[int]=False , __magic_name__ : Tuple=True , __magic_name__ : List[Any]=False , __magic_name__ : Any="<s>" , __magic_name__ : List[str]="</s>" , __magic_name__ : List[str]="<unk>" , __magic_name__ : Optional[Any]="<sep>" , __magic_name__ : Optional[Any]="<pad>" , __magic_name__ : Optional[int]="<cls>" , __magic_name__ : Tuple="<mask>" , __magic_name__ : str=["<eop>", "<eod>"] , **__magic_name__ : int , ) -> Optional[Any]:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : Union[str, Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
super().__init__(
vocab_file=__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , remove_space=__magic_name__ , keep_accents=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , )
UpperCAmelCase_ : Union[str, Any] = 3
UpperCAmelCase_ : Any = do_lower_case
UpperCAmelCase_ : Union[str, Any] = remove_space
UpperCAmelCase_ : List[Any] = keep_accents
UpperCAmelCase_ : List[str] = vocab_file
UpperCAmelCase_ : Tuple = False if not self.vocab_file else True
def UpperCAmelCase__ ( self : str , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = [self.sep_token_id]
UpperCAmelCase_ : int = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = [self.sep_token_id]
UpperCAmelCase_ : int = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__magic_name__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase_ : Union[str, Any] = os.path.join(
__magic_name__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ):
copyfile(self.vocab_file , __magic_name__ )
return (out_vocab_file,)
| 644
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __a (lowerCamelCase , unittest.TestCase ):
__a : List[str] = BlenderbotSmallTokenizer
__a : List[Any] = False
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
super().setUp()
UpperCAmelCase_ : Tuple = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
UpperCAmelCase_ : Optional[Any] = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
UpperCAmelCase_ : int = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
UpperCAmelCase_ : Optional[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__magic_name__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__magic_name__ ) )
def UpperCAmelCase__ ( self : List[Any] , **__magic_name__ : Dict ) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def UpperCAmelCase__ ( self : Optional[int] , __magic_name__ : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = '''adapt act apte'''
UpperCAmelCase_ : Tuple = '''adapt act apte'''
return input_text, output_text
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : str = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase_ : List[Any] = '''adapt act apte'''
UpperCAmelCase_ : Dict = ['''adapt''', '''act''', '''ap@@''', '''te''']
UpperCAmelCase_ : Dict = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
UpperCAmelCase_ : Tuple = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
UpperCAmelCase_ : Dict = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [13_84]
UpperCAmelCase_ : Optional[int] = '''I am a small frog.'''
UpperCAmelCase_ : List[str] = tok([src_text] , padding=__magic_name__ , truncation=__magic_name__ )['''input_ids''']
UpperCAmelCase_ : Dict = tok.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : int = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
UpperCAmelCase_ : List[Any] = '''I am a small frog .'''
UpperCAmelCase_ : Any = '''.'''
UpperCAmelCase_ : List[Any] = tok(__magic_name__ )['''input_ids''']
UpperCAmelCase_ : Optional[int] = tok(__magic_name__ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 644
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ :List[Any] = {
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ :Optional[Any] = [
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ :Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 150
|
'''simple docstring'''
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def __lowercase () -> str:
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
__lowerCamelCase : List[Any] = """__test_patch_submodule_mock__"""
with patch_submodule(_test_patching, """os.path.join""", _lowercase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os, _PatchedModuleObj )
assert isinstance(_test_patching.os.path, _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path, _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os, _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path, _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path, _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def __lowercase () -> str:
"""simple docstring"""
assert _test_patching.open is open
__lowerCamelCase : List[str] = """__test_patch_submodule_builtin_mock__"""
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching, """open""", _lowercase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def __lowercase () -> Dict:
"""simple docstring"""
# pandas.read_csv is not present in _test_patching
__lowerCamelCase : List[str] = """__test_patch_submodule_missing_mock__"""
with patch_submodule(_test_patching, """pandas.read_csv""", _lowercase ):
pass
def __lowercase () -> Union[str, Any]:
"""simple docstring"""
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
__lowerCamelCase : Tuple = """__test_patch_submodule_missing_builtin_mock__"""
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching, """len""", _lowercase ) is None
with patch_submodule(_test_patching, """len""", _lowercase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def __lowercase () -> List[str]:
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = """__test_patch_submodule_start_and_stop_mock__"""
__lowerCamelCase : Dict = patch_submodule(_test_patching, """open""", _lowercase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def __lowercase () -> List[Any]:
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
__lowerCamelCase : Dict = """__test_patch_submodule_successive_join__"""
__lowerCamelCase : List[str] = """__test_patch_submodule_successive_dirname__"""
__lowerCamelCase : Dict = """__test_patch_submodule_successive_rename__"""
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching, """os.path.join""", _lowercase ):
with patch_submodule(_test_patching, """os.rename""", _lowercase ):
with patch_submodule(_test_patching, """os.path.dirname""", _lowercase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching, """os.rename""", _lowercase ):
with patch_submodule(_test_patching, """os.path.join""", _lowercase ):
with patch_submodule(_test_patching, """os.path.dirname""", _lowercase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def __lowercase () -> List[Any]:
"""simple docstring"""
__lowerCamelCase : List[Any] = """__test_patch_submodule_doesnt_exist_mock__"""
with patch_submodule(_test_patching, """__module_that_doesn_exist__.__attribute_that_doesn_exist__""", _lowercase ):
pass
with patch_submodule(_test_patching, """os.__attribute_that_doesn_exist__""", _lowercase ):
pass
| 150
| 1
|
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
_lowerCamelCase : List[str] = random.Random()
if is_torch_available():
import torch
def __lowerCamelCase (UpperCAmelCase__ : Dict , UpperCAmelCase__ : str=1.0 , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Tuple=None ):
if rng is None:
SCREAMING_SNAKE_CASE = global_rng
SCREAMING_SNAKE_CASE = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase ( unittest.TestCase ):
def __init__( self : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int=7 , _UpperCamelCase : Optional[Any]=400 , _UpperCamelCase : List[str]=2_000 , _UpperCamelCase : List[str]=1 , _UpperCamelCase : List[str]=0.0 , _UpperCamelCase : Optional[Any]=16_000 , _UpperCamelCase : str=True , _UpperCamelCase : int=True , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = min_seq_length
SCREAMING_SNAKE_CASE = max_seq_length
SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE = feature_size
SCREAMING_SNAKE_CASE = padding_value
SCREAMING_SNAKE_CASE = sampling_rate
SCREAMING_SNAKE_CASE = return_attention_mask
SCREAMING_SNAKE_CASE = do_normalize
def __snake_case( self : Dict ) -> str:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __snake_case( self : int , _UpperCamelCase : Union[str, Any]=False , _UpperCamelCase : Dict=False ) -> Tuple:
'''simple docstring'''
def _flatten(_UpperCamelCase : int ):
return list(itertools.chain(*_UpperCamelCase ) )
if equal_length:
SCREAMING_SNAKE_CASE = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( a , unittest.TestCase ):
lowercase__ : List[Any] = ASTFeatureExtractor
def __snake_case( self : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ASTFeatureExtractionTester(self )
def __snake_case( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
SCREAMING_SNAKE_CASE = [np.asarray(_UpperCamelCase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
SCREAMING_SNAKE_CASE = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
# Test batched
SCREAMING_SNAKE_CASE = feat_extract(_UpperCamelCase , padding=_UpperCamelCase , return_tensors="np" ).input_values
SCREAMING_SNAKE_CASE = feat_extract(_UpperCamelCase , padding=_UpperCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)]
SCREAMING_SNAKE_CASE = np.asarray(_UpperCamelCase )
SCREAMING_SNAKE_CASE = feat_extract(_UpperCamelCase , return_tensors="np" ).input_values
SCREAMING_SNAKE_CASE = feat_extract(_UpperCamelCase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertTrue(np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 ) )
@require_torch
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE = np.random.rand(100 ).astype(np.floataa )
SCREAMING_SNAKE_CASE = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __snake_case( self : Optional[int] , _UpperCamelCase : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
from datasets import load_dataset
SCREAMING_SNAKE_CASE = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE = ds.sort("id" ).select(range(_UpperCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def __snake_case( self : List[str] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
SCREAMING_SNAKE_CASE = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE = ASTFeatureExtractor()
SCREAMING_SNAKE_CASE = feature_extractor(_UpperCamelCase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1_024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _UpperCamelCase , atol=1e-4 ) )
| 720
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict=False ):
SCREAMING_SNAKE_CASE = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"module.blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"module.blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"module.blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"module.blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"module.blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"module.blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : str=False ):
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE = ""
else:
SCREAMING_SNAKE_CASE = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(F"module.blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE = state_dict.pop(F"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase (UpperCAmelCase__ : Tuple ):
SCREAMING_SNAKE_CASE = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : Any ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
SCREAMING_SNAKE_CASE = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ):
SCREAMING_SNAKE_CASE = dct.pop(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = val
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = ViTMSNConfig()
SCREAMING_SNAKE_CASE = 1_0_0_0
SCREAMING_SNAKE_CASE = "datasets/huggingface/label-files"
SCREAMING_SNAKE_CASE = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ ) , "r" ) )
SCREAMING_SNAKE_CASE = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE = 3_8_4
SCREAMING_SNAKE_CASE = 1_5_3_6
SCREAMING_SNAKE_CASE = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE = 1_0_2_4
SCREAMING_SNAKE_CASE = 4_0_9_6
SCREAMING_SNAKE_CASE = 2_4
SCREAMING_SNAKE_CASE = 1_6
SCREAMING_SNAKE_CASE = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = 1_0_2_4
SCREAMING_SNAKE_CASE = 4_0_9_6
SCREAMING_SNAKE_CASE = 2_4
SCREAMING_SNAKE_CASE = 1_6
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = ViTMSNModel(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(UpperCAmelCase__ , map_location="cpu" )["target_encoder"]
SCREAMING_SNAKE_CASE = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = create_rename_keys(UpperCAmelCase__ , base_model=UpperCAmelCase__ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
read_in_q_k_v(UpperCAmelCase__ , UpperCAmelCase__ , base_model=UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw )
SCREAMING_SNAKE_CASE = ViTImageProcessor(
size=config.image_size , image_mean=UpperCAmelCase__ , image_std=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = image_processor(images=UpperCAmelCase__ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE = model(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
SCREAMING_SNAKE_CASE = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCAmelCase__ , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 647
| 0
|
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : List[Any] = MvpTokenizer
__UpperCamelCase : Dict = MvpTokenizerFast
__UpperCamelCase : int = True
__UpperCamelCase : str = filter_roberta_detectors
def lowerCAmelCase__ ( self : List[Any] ):
super().setUp()
UpperCamelCase_: Any = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
UpperCamelCase_: Tuple = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
UpperCamelCase_: Optional[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCamelCase_: List[Any] = {"""unk_token""": """<unk>"""}
UpperCamelCase_: List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCamelCase_: List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(snake_case_ ) )
def lowerCAmelCase__ ( self : Union[str, Any] , **snake_case_ : Optional[int] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] , **snake_case_ : Optional[int] ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def lowerCAmelCase__ ( self : str , snake_case_ : List[Any] ):
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase__ ( self : str ):
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def lowerCAmelCase__ ( self : Tuple ):
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: int = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase_: Dict = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase_: int = tokenizer(snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCamelCase_: List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
# Test that special tokens are reset
@require_torch
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: int = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase_: List[Any] = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , snake_case_ )
self.assertIn("""attention_mask""" , snake_case_ )
self.assertNotIn("""labels""" , snake_case_ )
self.assertNotIn("""decoder_attention_mask""" , snake_case_ )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Dict = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase_: Any = tokenizer(text_target=snake_case_ , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCAmelCase__ ( self : int ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase_: str = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def lowerCAmelCase__ ( self : Tuple ):
UpperCamelCase_: Any = ["""A long paragraph for summarization."""]
UpperCamelCase_: Tuple = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCamelCase_: Dict = tokenizer(snake_case_ , text_target=snake_case_ , return_tensors="""pt""" )
UpperCamelCase_: List[str] = inputs["""input_ids"""]
UpperCamelCase_: str = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCAmelCase__ ( self : str ):
pass
def lowerCAmelCase__ ( self : str ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCamelCase_: Tuple = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
UpperCamelCase_: Tuple = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
UpperCamelCase_: Union[str, Any] = """A, <mask> AllenNLP sentence."""
UpperCamelCase_: List[Any] = tokenizer_r.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
UpperCamelCase_: Union[str, Any] = tokenizer_p.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
UpperCamelCase_: List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
UpperCamelCase_: Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
snake_case_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
snake_case_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 548
|
from __future__ import annotations
def A__ ( lowerCamelCase ) -> bool:
UpperCamelCase_: Optional[int] = len(lowerCamelCase )
# We need to create solution object to save path.
UpperCamelCase_: List[str] = [[0 for _ in range(lowerCamelCase )] for _ in range(lowerCamelCase )]
UpperCamelCase_: Dict = run_maze(lowerCamelCase , 0 , 0 , lowerCamelCase )
if solved:
print("""\n""".join(str(lowerCamelCase ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def A__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> bool:
UpperCamelCase_: Dict = len(lowerCamelCase )
# Final check point.
if i == j == (size - 1):
UpperCamelCase_: Union[str, Any] = 1
return True
UpperCamelCase_: List[str] = (not i < 0) and (not j < 0) # Check lower bounds
UpperCamelCase_: List[str] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCamelCase_: Dict = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCamelCase_: Optional[Any] = 1
# check for directions
if (
run_maze(lowerCamelCase , i + 1 , lowerCamelCase , lowerCamelCase )
or run_maze(lowerCamelCase , lowerCamelCase , j + 1 , lowerCamelCase )
or run_maze(lowerCamelCase , i - 1 , lowerCamelCase , lowerCamelCase )
or run_maze(lowerCamelCase , lowerCamelCase , j - 1 , lowerCamelCase )
):
return True
UpperCamelCase_: Optional[Any] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 548
| 1
|
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
A__ : Union[str, Any] = """src/transformers"""
A__ : str = """docs/source/en"""
A__ : int = """."""
def a_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : List[Any] ) -> List[str]:
with open(_UpperCAmelCase ,'r' ,encoding='utf-8' ,newline='\n' ) as f:
__snake_case : str = f.readlines()
# Find the start prompt.
__snake_case : Optional[Any] = 0
while not lines[start_index].startswith(_UpperCAmelCase ):
start_index += 1
start_index += 1
__snake_case : List[Any] = start_index
while not lines[end_index].startswith(_UpperCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
A__ : Union[str, Any] = """Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
A__ : Optional[Any] = re.compile(R'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
A__ : str = re.compile(R'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
A__ : Tuple = re.compile(R'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
A__ : Optional[int] = direct_transformers_import(TRANSFORMERS_PATH)
def a_ ( _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
__snake_case : Optional[int] = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' ,_UpperCAmelCase )
return [m.group(0 ) for m in matches]
def a_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
__snake_case : List[str] = 2 if text == "✅" or text == "❌" else len(_UpperCAmelCase )
__snake_case : List[Any] = (width - text_length) // 2
__snake_case : List[str] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def a_ ( ) -> List[str]:
__snake_case : int = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__snake_case : Union[str, Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__snake_case : List[str] = {name: config.replace('Config' ,'' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__snake_case : Tuple = collections.defaultdict(_UpperCAmelCase )
__snake_case : Optional[Any] = collections.defaultdict(_UpperCAmelCase )
__snake_case : int = collections.defaultdict(_UpperCAmelCase )
__snake_case : List[str] = collections.defaultdict(_UpperCAmelCase )
__snake_case : List[str] = collections.defaultdict(_UpperCAmelCase )
# Let's lookup through all transformers object (once).
for attr_name in dir(_UpperCAmelCase ):
__snake_case : Union[str, Any] = None
if attr_name.endswith('Tokenizer' ):
__snake_case : int = slow_tokenizers
__snake_case : Any = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
__snake_case : int = fast_tokenizers
__snake_case : List[Any] = attr_name[:-13]
elif _re_tf_models.match(_UpperCAmelCase ) is not None:
__snake_case : List[str] = tf_models
__snake_case : List[Any] = _re_tf_models.match(_UpperCAmelCase ).groups()[0]
elif _re_flax_models.match(_UpperCAmelCase ) is not None:
__snake_case : int = flax_models
__snake_case : Any = _re_flax_models.match(_UpperCAmelCase ).groups()[0]
elif _re_pt_models.match(_UpperCAmelCase ) is not None:
__snake_case : Tuple = pt_models
__snake_case : int = _re_pt_models.match(_UpperCAmelCase ).groups()[0]
if lookup_dict is not None:
while len(_UpperCAmelCase ) > 0:
if attr_name in model_name_to_prefix.values():
__snake_case : Any = True
break
# Try again after removing the last word in the name
__snake_case : Dict = "".join(camel_case_split(_UpperCAmelCase )[:-1] )
# Let's build that table!
__snake_case : Tuple = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__snake_case : Tuple = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__snake_case : List[Any] = [len(_UpperCAmelCase ) + 2 for c in columns]
__snake_case : List[str] = max([len(_UpperCAmelCase ) for name in model_names] ) + 2
# Build the table per se
__snake_case : List[str] = "|" + "|".join([_center_text(_UpperCAmelCase ,_UpperCAmelCase ) for c, w in zip(_UpperCAmelCase ,_UpperCAmelCase )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
__snake_case : str = {True: "✅", False: "❌"}
for name in model_names:
__snake_case : Any = model_name_to_prefix[name]
__snake_case : List[str] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_UpperCAmelCase ,_UpperCAmelCase ) for l, w in zip(_UpperCAmelCase ,_UpperCAmelCase )] ) + "|\n"
return table
def a_ ( _UpperCAmelCase : Optional[Any]=False ) -> int:
__snake_case : Optional[int] = _find_text_in_file(
filename=os.path.join(_UpperCAmelCase ,'index.md' ) ,start_prompt='<!--This table is updated automatically from the auto modules' ,end_prompt='<!-- End table-->' ,)
__snake_case : Optional[Any] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_UpperCAmelCase ,'index.md' ) ,'w' ,encoding='utf-8' ,newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
A__ : Union[str, Any] = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 721
|
'''simple docstring'''
import random
def a_ ( _UpperCAmelCase : list ,_UpperCAmelCase : List[Any] ) -> tuple:
__snake_case , __snake_case , __snake_case : int = [], [], []
for element in data:
if element < pivot:
less.append(_UpperCAmelCase )
elif element > pivot:
greater.append(_UpperCAmelCase )
else:
equal.append(_UpperCAmelCase )
return less, equal, greater
def a_ ( _UpperCAmelCase : list ,_UpperCAmelCase : int ) -> int:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_UpperCAmelCase ) or index < 0:
return None
__snake_case : int = items[random.randint(0 ,len(_UpperCAmelCase ) - 1 )]
__snake_case : List[Any] = 0
__snake_case , __snake_case , __snake_case : Any = _partition(_UpperCAmelCase ,_UpperCAmelCase )
__snake_case : List[str] = len(_UpperCAmelCase )
__snake_case : int = len(_UpperCAmelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_UpperCAmelCase ,_UpperCAmelCase )
# must be in larger
else:
return quick_select(_UpperCAmelCase ,index - (m + count) )
| 124
| 0
|
def UpperCamelCase ( ) -> int:
'''simple docstring'''
return 1
def UpperCamelCase ( _a ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def UpperCamelCase ( _a ) -> int:
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(_a )
def UpperCamelCase ( _a ) -> int:
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 1_0 ) + five_pence(_a )
def UpperCamelCase ( _a ) -> int:
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 2_0 ) + ten_pence(_a )
def UpperCamelCase ( _a ) -> int:
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 5_0 ) + twenty_pence(_a )
def UpperCamelCase ( _a ) -> int:
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 1_0_0 ) + fifty_pence(_a )
def UpperCamelCase ( _a ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 2_0_0 ) + one_pound(_a )
def UpperCamelCase ( _a = 2_0_0 ) -> int:
'''simple docstring'''
return two_pound(_a )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 257
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : str = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
SCREAMING_SNAKE_CASE : str = {
"facebook/mbart-large-en-ro": 1_024,
"facebook/mbart-large-cc25": 1_024,
}
# fmt: off
SCREAMING_SNAKE_CASE : Optional[Any] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : List[str] =VOCAB_FILES_NAMES
lowercase : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
lowercase : Union[str, Any] =["""input_ids""", """attention_mask"""]
lowercase : Optional[int] =MBartTokenizer
lowercase : List[int] =[]
lowercase : List[int] =[]
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ , ):
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ :Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
vocab_file=UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , src_lang=UpperCamelCase_ , tgt_lang=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase_ :Optional[int] = vocab_file
lowercase_ :Any = False if not self.vocab_file else True
lowercase_ :int = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
lowercase_ :Optional[int] = {
lang_code: self.convert_tokens_to_ids(UpperCamelCase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowercase_ :Dict = src_lang if src_lang is not None else '''en_XX'''
lowercase_ :Any = self.convert_tokens_to_ids(self._src_lang )
lowercase_ :Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase ( self ):
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
lowercase_ :Optional[Any] = [self.sep_token_id]
lowercase_ :Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase_ :str = src_lang
lowercase_ :List[Any] = self(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
lowercase_ :Union[str, Any] = self.convert_tokens_to_ids(UpperCamelCase_ )
lowercase_ :Any = tgt_lang_id
return inputs
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = "en_XX" , UpperCamelCase_ = None , UpperCamelCase_ = "ro_RO" , **UpperCamelCase_ , ):
lowercase_ :List[str] = src_lang
lowercase_ :Any = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Union[str, Any] = self.convert_tokens_to_ids(UpperCamelCase_ )
lowercase_ :Tuple = []
lowercase_ :Tuple = [self.eos_token_id, self.cur_lang_code]
lowercase_ :Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ :int = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ :Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :List[str] = self.convert_tokens_to_ids(UpperCamelCase_ )
lowercase_ :Union[str, Any] = []
lowercase_ :Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
lowercase_ :Dict = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ :List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ :int = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory." )
return
lowercase_ :Dict = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 257
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase : Tuple = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 704
|
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = ["image_processor", "tokenizer"]
UpperCAmelCase_ = "AutoImageProcessor"
UpperCAmelCase_ = "AutoTokenizer"
def __init__( self : List[Any], _UpperCAmelCase : Any=None, _UpperCAmelCase : int=None, **_UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.", _UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : int = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processor
SCREAMING_SNAKE_CASE__ : List[Any] = False
def __call__( self : Optional[Any], *_UpperCAmelCase : Any, **_UpperCAmelCase : str ) -> Optional[int]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = kwargs.pop("images", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = kwargs.pop("text", _UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE__ : str = args[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
SCREAMING_SNAKE_CASE__ : Any = self.image_processor(_UpperCAmelCase, *_UpperCAmelCase, **_UpperCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(_UpperCAmelCase, **_UpperCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
SCREAMING_SNAKE_CASE__ : int = encodings["input_ids"]
return inputs
def A_ ( self : Tuple, *_UpperCAmelCase : List[Any], **_UpperCAmelCase : Dict ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : List[Any], *_UpperCAmelCase : int, **_UpperCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
return self.tokenizer.decode(*_UpperCAmelCase, **_UpperCAmelCase )
@contextmanager
def A_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer
yield
SCREAMING_SNAKE_CASE__ : List[str] = self.image_processor
SCREAMING_SNAKE_CASE__ : List[Any] = False
def A_ ( self : str, _UpperCAmelCase : Tuple, _UpperCAmelCase : Any=False, _UpperCAmelCase : Union[str, Any]=None ) -> Any:
"""simple docstring"""
if added_vocab is None:
SCREAMING_SNAKE_CASE__ : int = self.tokenizer.get_added_vocab()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
while tokens:
SCREAMING_SNAKE_CASE__ : Dict = re.search(r"<s_(.*?)>", _UpperCAmelCase, re.IGNORECASE )
if start_token is None:
break
SCREAMING_SNAKE_CASE__ : List[Any] = start_token.group(1 )
SCREAMING_SNAKE_CASE__ : str = re.search(rF'''</s_{key}>''', _UpperCAmelCase, re.IGNORECASE )
SCREAMING_SNAKE_CASE__ : List[str] = start_token.group()
if end_token is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = tokens.replace(_UpperCAmelCase, "" )
else:
SCREAMING_SNAKE_CASE__ : int = end_token.group()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.escape(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = re.escape(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''', _UpperCAmelCase, re.IGNORECASE )
if content is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenajson(_UpperCAmelCase, is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase )
if value:
if len(_UpperCAmelCase ) == 1:
SCREAMING_SNAKE_CASE__ : List[str] = value[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = value
else: # leaf nodes
SCREAMING_SNAKE_CASE__ : Tuple = []
for leaf in content.split(r"<sep/>" ):
SCREAMING_SNAKE_CASE__ : str = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
SCREAMING_SNAKE_CASE__ : List[Any] = leaf[1:-2] # for categorical special tokens
output[key].append(_UpperCAmelCase )
if len(output[key] ) == 1:
SCREAMING_SNAKE_CASE__ : int = output[key][0]
SCREAMING_SNAKE_CASE__ : int = tokens[tokens.find(_UpperCAmelCase ) + len(_UpperCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=_UpperCAmelCase, added_vocab=_UpperCAmelCase )
if len(_UpperCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def A_ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", _UpperCAmelCase, )
return self.image_processor_class
@property
def A_ ( self : Any ) -> int:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", _UpperCAmelCase, )
return self.image_processor
| 157
| 0
|
from copy import deepcopy
class lowercase_ :
def __init__( self , lowercase_ = None , lowercase_ = None) -> None:
if arr is None and size is not None:
a__ =size
a__ =[0] * size
elif arr is not None:
self.init(lowercase_)
else:
raise ValueError('Either arr or size must be specified')
def __UpperCamelCase ( self , lowercase_) -> None:
a__ =len(lowercase_)
a__ =deepcopy(lowercase_)
for i in range(1 , self.size):
a__ =self.next_(lowercase_)
if j < self.size:
self.tree[j] += self.tree[i]
def __UpperCamelCase ( self) -> list[int]:
a__ =self.tree[:]
for i in range(self.size - 1 , 0 , -1):
a__ =self.next_(lowercase_)
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __UpperCamelCase ( lowercase_) -> int:
return index + (index & (-index))
@staticmethod
def __UpperCamelCase ( lowercase_) -> int:
return index - (index & (-index))
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
a__ =self.next_(lowercase_)
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> None:
self.add(lowercase_ , value - self.get(lowercase_))
def __UpperCamelCase ( self , lowercase_) -> int:
if right == 0:
return 0
a__ =self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
a__ =self.prev(lowercase_)
return result
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> int:
return self.prefix(lowercase_) - self.prefix(lowercase_)
def __UpperCamelCase ( self , lowercase_) -> int:
return self.query(lowercase_ , index + 1)
def __UpperCamelCase ( self , lowercase_) -> int:
value -= self.tree[0]
if value < 0:
return -1
a__ =1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
a__ =0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger(__name__)
_lowerCAmelCase: Tuple = torch.device('cpu')
def _lowercase( ):
a__ ='http://images.cocodataset.org/val2017/000000039769.jpg'
a__ =Image.open(requests.get(__a , stream=__a ).raw )
return im
def _lowercase( __a : Optional[Any] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def _lowercase( __a : int , __a : int , __a : Optional[Any] ):
a__ =dct.pop(__a )
a__ =val
def _lowercase( __a : Optional[Any] ):
a__ =[]
for k in state_dict.keys():
a__ =k
if ".pwconv" in k:
a__ =k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a__ =k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a__ =k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a__ =k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a__ =k_new.split('.' )
if ls[2].isdigit():
a__ ='swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a__ =k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _lowercase( __a : Union[str, Any] , __a : int , __a : str ):
a__ =SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a__ =1000
a__ ='huggingface/label-files'
a__ ='imagenet-1k-id2label.json'
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a__ =[3, 3, 6, 4]
a__ =[48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a__ =[3, 3, 9, 6]
a__ =[48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a__ =[4, 3, 10, 5]
a__ =[48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a__ =[4, 4, 12, 6]
a__ =[64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a__ =torch.hub.load_state_dict_from_url(__a , map_location='cpu' , check_hash=__a )
else:
a__ =torch.load(__a , map_location='cpu' )
a__ =checkpoint
a__ =create_rename_keys(__a )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__a , __a , __a )
# load HuggingFace model
a__ =SwiftFormerForImageClassification(__a ).eval()
hf_model.load_state_dict(__a )
# prepare test inputs
a__ =prepare_img()
a__ =ViTImageProcessor.from_pretrained('preprocessor_config' )
a__ =processor(images=__a , return_tensors='pt' )
# compare outputs from both models
a__ =get_expected_output(__a )
a__ =hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __a , atol=1e-3 )
Path(__a ).mkdir(exist_ok=__a )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__a )
if __name__ == "__main__":
_lowerCAmelCase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_lowerCAmelCase: Optional[int] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 20
| 1
|
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _SCREAMING_SNAKE_CASE ( __lowercase : int ) -> Union[str, Any]:
"""simple docstring"""
__A = checkpoints.load_tax_checkpoint(__lowercase )
__A = flatten_dict(__lowercase )
return flax_params
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
__A = {}
__A = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
__A = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__A = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__A = new_key.replace(__lowercase , __lowercase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__A = new_key.replace(__lowercase , __lowercase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__A = re.sub(R"""layers_(\d+)""" , R"""layer.\1""" , __lowercase )
__A = new_key.replace("""encoder""" , """encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__A = re.sub(R"""layers_(\d+)""" , R"""layer.\1""" , __lowercase )
__A = flax_dict[key]
__A = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__A = torch.from_numpy(converted_dict[key].T )
else:
__A = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : List[Any] , __lowercase : Dict=False , __lowercase : List[str]=False ) -> Any:
"""simple docstring"""
__A = get_flax_param(__lowercase )
if not use_large:
__A = PixaStructVisionConfig()
__A = PixaStructTextConfig()
else:
__A = PixaStructVisionConfig(
hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_attention_heads=2_4 , num_hidden_layers=1_8 )
__A = PixaStructTextConfig(hidden_size=1_5_3_6 , d_ff=3_9_6_8 , num_heads=2_4 , num_layers=1_8 )
__A = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__lowercase )
__A = PixaStructForConditionalGeneration(__lowercase )
__A = rename_and_convert_flax_params(__lowercase )
model.load_state_dict(__lowercase )
__A = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
__A = PixaStructImageProcessor()
__A = PixaStructProcessor(image_processor=__lowercase , tokenizer=__lowercase )
if use_large:
__A = 4_0_9_6
__A = True
# mkdir if needed
os.makedirs(__lowercase , exist_ok=__lowercase )
model.save_pretrained(__lowercase )
processor.save_pretrained(__lowercase )
print("""Model saved in {}""".format(__lowercase ) )
if __name__ == "__main__":
__a : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
__a : Dict = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 707
|
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any]=13 , UpperCamelCase_ : Optional[Any]=30 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Optional[Any]=3 , UpperCamelCase_ : int=True , UpperCamelCase_ : str=True , UpperCamelCase_ : str=32 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : List[str]=37 , UpperCamelCase_ : Any="gelu" , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : Any=10 , UpperCamelCase_ : str=0.02 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : int=None , ):
"""simple docstring"""
__A = parent
__A = batch_size
__A = image_size
__A = patch_size
__A = num_channels
__A = is_training
__A = use_labels
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = type_sequence_label_size
__A = initializer_range
__A = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__A = (image_size // patch_size) ** 2
__A = num_patches + 1
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int] ):
"""simple docstring"""
__A = TFViTModel(config=UpperCamelCase_ )
__A = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
__A = self.image_size // 2
__A = pixel_values[:, :, :image_size, :image_size]
__A = model(UpperCamelCase_ , interpolate_pos_encoding=UpperCamelCase_ , training=UpperCamelCase_ )
__A = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] ):
"""simple docstring"""
__A = self.type_sequence_label_size
__A = TFViTForImageClassification(UpperCamelCase_ )
__A = model(UpperCamelCase_ , labels=UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
__A = self.image_size // 2
__A = pixel_values[:, :, :image_size, :image_size]
__A = model(UpperCamelCase_ , interpolate_pos_encoding=UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__A = 1
__A = TFViTForImageClassification(UpperCamelCase_ )
__A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__A = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = self.prepare_config_and_inputs()
__A , __A , __A = config_and_inputs
__A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __lowercase ( lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE = (
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
__A = TFViTModelTester(self )
__A = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(UpperCamelCase_ )
__A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@slow
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(UpperCamelCase_ )
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
__A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__A = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=UpperCamelCase_ , return_tensors="""tf""" )
# forward pass
__A = model(**UpperCamelCase_ )
# verify the logits
__A = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
__A = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4 )
| 199
| 0
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 454
|
from math import sqrt
def lowerCAmelCase_ ( __lowerCamelCase = 1_0_0_0_0_0_0 ):
__snake_case : int = 0
__snake_case : int = 0
__snake_case : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__lowerCamelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 81
| 0
|
"""simple docstring"""
from __future__ import annotations
class _lowercase :
"""simple docstring"""
def __init__( self : str , UpperCamelCase__ : Optional[int]=None ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =data
__UpperCamelCase =None
def __repr__( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =[]
__UpperCamelCase =self
while temp:
string_rep.append(f"""{temp.data}""" )
__UpperCamelCase =temp.next
return "->".join(UpperCamelCase__ )
def lowerCAmelCase (__UpperCamelCase : list ):
"""simple docstring"""
if not elements_list:
raise Exception('''The Elements List is empty''' )
__UpperCamelCase =__UpperCamelCase =Node(elements_list[0] )
for i in range(1 , len(__UpperCamelCase ) ):
__UpperCamelCase =Node(elements_list[i] )
__UpperCamelCase =current.next
return head
def lowerCAmelCase (__UpperCamelCase : Node ):
"""simple docstring"""
if head_node is not None and isinstance(__UpperCamelCase , __UpperCamelCase ):
print_reverse(head_node.next )
print(head_node.data )
def lowerCAmelCase ():
"""simple docstring"""
from doctest import testmod
testmod()
__UpperCamelCase =make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] )
print('''Linked List:''' )
print(__UpperCamelCase )
print('''Elements in Reverse:''' )
print_reverse(__UpperCamelCase )
if __name__ == "__main__":
main()
| 705
|
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase (__UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] ):
"""simple docstring"""
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase (__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] ):
"""simple docstring"""
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCamelCase =JsonDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase (__UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCamelCase =features.copy() if features else default_expected_features
__UpperCamelCase =(
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase =JsonDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def lowerCAmelCase (__UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
__UpperCamelCase =features.copy() if features else default_expected_features
__UpperCamelCase =(
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase =JsonDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowerCAmelCase (__UpperCamelCase : Tuple , __UpperCamelCase : List[str] ):
"""simple docstring"""
__UpperCamelCase ={'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
__UpperCamelCase =features.copy()
__UpperCamelCase =(
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase =JsonDatasetReader(__UpperCamelCase , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
assert isinstance(__UpperCamelCase , __UpperCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : str ):
"""simple docstring"""
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCamelCase =JsonDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase , split=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase , __UpperCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCAmelCase (__UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple ):
"""simple docstring"""
if issubclass(__UpperCamelCase , __UpperCamelCase ):
__UpperCamelCase =jsonl_path
elif issubclass(__UpperCamelCase , __UpperCamelCase ):
__UpperCamelCase =[jsonl_path]
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCamelCase =JsonDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_json_dataset(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict=("train",) ):
"""simple docstring"""
assert isinstance(__UpperCamelCase , __UpperCamelCase )
for split in splits:
__UpperCamelCase =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] ):
"""simple docstring"""
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCamelCase =JsonDatasetReader({'''train''': jsonl_path} , cache_dir=__UpperCamelCase , keep_in_memory=__UpperCamelCase ).read()
_check_json_datasetdict(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase (__UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ):
"""simple docstring"""
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCamelCase =features.copy() if features else default_expected_features
__UpperCamelCase =(
Features({feature: Value(__UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase =JsonDatasetReader({'''train''': jsonl_path} , features=__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_json_datasetdict(__UpperCamelCase , __UpperCamelCase )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase (__UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
if split:
__UpperCamelCase ={split: jsonl_path}
else:
__UpperCamelCase ='''train'''
__UpperCamelCase ={'''train''': jsonl_path, '''test''': jsonl_path}
__UpperCamelCase =tmp_path / '''cache'''
__UpperCamelCase ={'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
__UpperCamelCase =JsonDatasetReader(__UpperCamelCase , cache_dir=__UpperCamelCase ).read()
_check_json_datasetdict(__UpperCamelCase , __UpperCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase (__UpperCamelCase : Dict ):
"""simple docstring"""
return json.load(__UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
return [json.loads(__UpperCamelCase ) for line in buffer]
class _lowercase :
"""simple docstring"""
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ ).write()
buffer.seek(0 )
__UpperCamelCase =load_json_function(UpperCamelCase__ )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert isinstance(exported_content[0] , UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def UpperCAmelCase_ ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ) -> str:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ , orient=UpperCamelCase__ ).write()
buffer.seek(0 )
__UpperCamelCase =load_json(UpperCamelCase__ )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase__ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase_ ( self : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] ) -> Any:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ , num_proc=2 ).write()
buffer.seek(0 )
__UpperCamelCase =load_json_function(UpperCamelCase__ )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert isinstance(exported_content[0] , UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 10
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789''' ), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def UpperCAmelCase_ ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str ) -> Any:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ , orient=UpperCamelCase__ , num_proc=2 ).write()
buffer.seek(0 )
__UpperCamelCase =load_json(UpperCamelCase__ )
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase__ , '''keys''' ) and not hasattr(exported_content[0] , '''keys''' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(UpperCamelCase__ ) == 10
def UpperCAmelCase_ ( self : List[Any] , UpperCamelCase__ : List[Any] ) -> Dict:
'''simple docstring'''
with pytest.raises(UpperCamelCase__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , num_proc=0 )
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')] )
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =tmp_path_factory.mktemp('''data''' ) / f"""test.json.{extension}"""
__UpperCamelCase =str(shared_datadir / f"""test_file.json.{extension}""" )
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , compression=UpperCamelCase__ ).write()
with fsspec.open(UpperCamelCase__ , '''rb''' , compression='''infer''' ) as f:
__UpperCamelCase =f.read()
with fsspec.open(UpperCamelCase__ , '''rb''' , compression='''infer''' ) as f:
__UpperCamelCase =f.read()
assert exported_content == original_content
| 296
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class UpperCamelCase_ (__A , __A ):
__magic_name__ = '''resnet'''
__magic_name__ = ['''basic''', '''bottleneck''']
def __init__( self : Optional[Any] , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : int=64 , lowerCAmelCase_ : Union[str, Any]=[256, 512, 1_024, 2_048] , lowerCAmelCase_ : str=[3, 4, 6, 3] , lowerCAmelCase_ : Dict="bottleneck" , lowerCAmelCase_ : Any="relu" , lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Optional[Any]=None , lowerCAmelCase_ : Optional[int]=None , **lowerCAmelCase_ : Union[str, Any] , ) -> Any:
super().__init__(**lowerCAmelCase_ )
if layer_type not in self.layer_types:
raise ValueError(f"""layer_type={layer_type} is not one of {','.join(self.layer_types )}""" )
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : Dict = embedding_size
UpperCAmelCase_ : Union[str, Any] = hidden_sizes
UpperCAmelCase_ : List[str] = depths
UpperCAmelCase_ : int = layer_type
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : Any = downsample_in_first_stage
UpperCAmelCase_ : List[str] = ["stem"] + [f"""stage{idx}""" for idx in range(1 , len(lowerCAmelCase_ ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase_ , out_indices=lowerCAmelCase_ , stage_names=self.stage_names )
class UpperCamelCase_ (__A ):
__magic_name__ = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> float:
return 1e-3
| 95
|
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def __lowerCamelCase ( A__ ) -> str:
"""simple docstring"""
if not sentence:
return ""
UpperCamelCase = dict(zip(A__ , A__ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 430
| 0
|
"""simple docstring"""
import os
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = os.path.dirname(os.path.realpath(UpperCamelCase__ ) )
A__ = os.path.join(UpperCamelCase__ , 'triangle.txt' )
with open(UpperCamelCase__ ) as f:
A__ = f.readlines()
A__ = []
for line in triangle:
A__ = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(UpperCamelCase__ ) )
a.append(UpperCamelCase__ )
for i in range(1 , len(UpperCamelCase__ ) ):
for j in range(len(a[i] ) ):
A__ = a[i - 1][j] if j != len(a[i - 1] ) else 0
A__ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(UpperCamelCase__ , UpperCamelCase__ )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 536
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["GLPNFeatureExtractor"]
__lowerCamelCase = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 536
| 1
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Union[str, Any] = '''ssube/stable-diffusion-x4-upscaler-onnx'''
def snake_case_ ( self , lowerCAmelCase__=0):
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(lowerCAmelCase__))
__SCREAMING_SNAKE_CASE = torch.manual_seed(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""")
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**lowerCAmelCase__).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice - expected_slice).max() < 1E-1
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""")
__SCREAMING_SNAKE_CASE = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**lowerCAmelCase__).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""")
__SCREAMING_SNAKE_CASE = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**lowerCAmelCase__).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""")
__SCREAMING_SNAKE_CASE = EulerDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**lowerCAmelCase__).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""")
__SCREAMING_SNAKE_CASE = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.get_dummy_inputs()
__SCREAMING_SNAKE_CASE = pipe(**lowerCAmelCase__).images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@property
def snake_case_ ( self):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = ort.SessionOptions()
__SCREAMING_SNAKE_CASE = False
return options
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""")
__SCREAMING_SNAKE_CASE = init_image.resize((1_2_8, 1_2_8))
# using the PNDM scheduler by default
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """A fantasy landscape, trending on artstation"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=lowerCAmelCase__ , output_type="""np""" , )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__SCREAMING_SNAKE_CASE = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""")
__SCREAMING_SNAKE_CASE = init_image.resize((1_2_8, 1_2_8))
__SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""")
__SCREAMING_SNAKE_CASE = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = """A fantasy landscape, trending on artstation"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0)
__SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=lowerCAmelCase__ , output_type="""np""" , )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66])
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice).max() < 2E-2
| 155
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ = 100 ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 155
| 1
|
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class _snake_case ( unittest.TestCase ):
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""")
UpperCAmelCase__ : List[str] = AutoTokenizer.from_pretrained("""xlm-roberta-base""")
UpperCAmelCase__ : Dict = """The dog is cute and lives in the garden house"""
UpperCAmelCase__ : Dict = jnp.array([tokenizer.encode(_lowerCamelCase)])
UpperCAmelCase__ : int = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ : Any = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]])
UpperCAmelCase__ : Any = model(_lowerCamelCase)["""last_hidden_state"""]
self.assertEqual(output.shape , _lowerCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , _lowerCamelCase , atol=1e-3))
| 113
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _snake_case ( a__ , a__ , unittest.TestCase ):
lowerCAmelCase :Optional[int] = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase :Union[str, Any] = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase :Optional[Any] = False
lowerCAmelCase :Dict = False
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False):
UpperCAmelCase__ : int = super()._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase)
if return_labels:
if model_class in get_values(_lowerCamelCase):
UpperCAmelCase__ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa)
return inputs_dict
class _snake_case ( a__ ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ):
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Any = seq_length
UpperCAmelCase__ : Dict = is_training
UpperCAmelCase__ : str = use_input_mask
UpperCAmelCase__ : int = use_token_type_ids
UpperCAmelCase__ : Optional[Any] = use_labels
UpperCAmelCase__ : Any = vocab_size
UpperCAmelCase__ : Optional[Any] = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : List[Any] = num_attention_heads
UpperCAmelCase__ : Dict = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[int] = max_position_embeddings
UpperCAmelCase__ : Union[str, Any] = type_vocab_size
UpperCAmelCase__ : Optional[int] = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Union[str, Any] = num_labels
UpperCAmelCase__ : List[str] = num_choices
UpperCAmelCase__ : str = scope
UpperCAmelCase__ : Optional[int] = embedding_size
def snake_case__ ( self):
UpperCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase__ : Optional[Any] = None
if self.use_input_mask:
UpperCAmelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase__ : Union[str, Any] = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase__ : int = None
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Any = None
if self.use_labels:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase__ : str = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : str = TFMobileBertModel(config=_lowerCamelCase)
UpperCAmelCase__ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase__ : int = model(_lowerCamelCase)
UpperCAmelCase__ : Dict = [input_ids, input_mask]
UpperCAmelCase__ : List[Any] = model(_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : List[str] = TFMobileBertForMaskedLM(config=_lowerCamelCase)
UpperCAmelCase__ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase__ : Dict = model(_lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = TFMobileBertForNextSentencePrediction(config=_lowerCamelCase)
UpperCAmelCase__ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase__ : List[str] = model(_lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : List[str] = TFMobileBertForPreTraining(config=_lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase__ : Any = model(_lowerCamelCase)
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Any = self.num_labels
UpperCAmelCase__ : Optional[Any] = TFMobileBertForSequenceClassification(config=_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase__ : Tuple = model(_lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Tuple = self.num_choices
UpperCAmelCase__ : Dict = TFMobileBertForMultipleChoice(config=_lowerCamelCase)
UpperCAmelCase__ : int = tf.tile(tf.expand_dims(_lowerCamelCase , 1) , (1, self.num_choices, 1))
UpperCAmelCase__ : str = tf.tile(tf.expand_dims(_lowerCamelCase , 1) , (1, self.num_choices, 1))
UpperCAmelCase__ : Optional[Any] = tf.tile(tf.expand_dims(_lowerCamelCase , 1) , (1, self.num_choices, 1))
UpperCAmelCase__ : Optional[int] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCAmelCase__ : List[str] = model(_lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : Optional[Any] = TFMobileBertForTokenClassification(config=_lowerCamelCase)
UpperCAmelCase__ : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase__ : List[Any] = model(_lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Any = TFMobileBertForQuestionAnswering(config=_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase__ : List[str] = model(_lowerCamelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def snake_case__ ( self):
UpperCAmelCase__ : Any = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Any = config_and_inputs
UpperCAmelCase__ : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
def snake_case__ ( self):
UpperCAmelCase__ : str = TFMobileBertModelTest.TFMobileBertModelTester(self)
UpperCAmelCase__ : List[Any] = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37)
def snake_case__ ( self):
self.config_tester.run_common_tests()
def snake_case__ ( self):
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_lowerCamelCase)
@slow
def snake_case__ ( self):
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
UpperCAmelCase__ : Optional[Any] = TFMobileBertModel.from_pretrained(_lowerCamelCase)
self.assertIsNotNone(_lowerCamelCase)
@require_tf
class _snake_case ( unittest.TestCase ):
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Any = TFMobileBertForPreTraining.from_pretrained("""google/mobilebert-uncased""")
UpperCAmelCase__ : str = tf.constant([[0, 1, 2, 3, 4, 5]])
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase)[0]
UpperCAmelCase__ : List[str] = [1, 6, 3_0522]
self.assertEqual(output.shape , _lowerCamelCase)
UpperCAmelCase__ : List[Any] = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
])
tf.debugging.assert_near(output[:, :3, :3] , _lowerCamelCase , atol=1e-4)
| 113
| 1
|
import argparse
import datetime
def a (lowerCAmelCase__ ):
__a = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
__a = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowerCAmelCase__ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
__a = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
__a = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
__a = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
__a = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
__a = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8_500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
__a = datetime.date(int(lowerCAmelCase__ ) , int(lowerCAmelCase__ ) , int(lowerCAmelCase__ ) )
# Start math
if m <= 2:
__a = y - 1
__a = m + 12
# maths var
__a = int(str(lowerCAmelCase__ )[:2] )
__a = int(str(lowerCAmelCase__ )[2:] )
__a = int(2.6 * m - 5.3_9 )
__a = int(c / 4 )
__a = int(k / 4 )
__a = int(d + k )
__a = int(t + u + v + x )
__a = int(z - (2 * c) )
__a = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
__a = f'''Your date {date_input}, is a {days[str(lowerCAmelCase__ )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
zeller(args.date_input)
| 99
|
'''simple docstring'''
def _lowerCAmelCase ( _lowerCAmelCase = 10_00 )-> int:
__UpperCAmelCase = 2**power
__UpperCAmelCase = 0
while n:
__UpperCAmelCase , __UpperCAmelCase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 126
| 0
|
import gc
import threading
import time
import psutil
import torch
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self )-> List[Any]:
'''simple docstring'''
__UpperCamelCase = psutil.Process()
__UpperCamelCase = False
def A__ ( self )-> List[Any]:
'''simple docstring'''
__UpperCamelCase = -1
while True:
__UpperCamelCase = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def A__ ( self )-> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = True
__UpperCamelCase = threading.Thread(target=self.peak_monitor )
__UpperCamelCase = True
self.thread.start()
def A__ ( self )-> Any:
'''simple docstring'''
__UpperCamelCase = False
self.thread.join()
return self.cpu_memory_peak
lowercase__ : Tuple = PeakCPUMemory()
def A_ ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__UpperCamelCase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__UpperCamelCase = torch.cuda.memory_allocated(snake_case )
torch.cuda.reset_peak_memory_stats()
return measures
def A_ ( snake_case : Any ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__UpperCamelCase = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
__UpperCamelCase = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__UpperCamelCase = (torch.cuda.memory_allocated(snake_case ) - start_measures[str(snake_case )]) / 2**20
__UpperCamelCase = (torch.cuda.max_memory_allocated(snake_case ) - start_measures[str(snake_case )]) / 2**20
return measures
def A_ ( snake_case : Optional[Any] , snake_case : int ) -> List[Any]:
'''simple docstring'''
print(f"{description}:" )
print(f"- Time: {measures['time']:.2f}s" )
for i in range(torch.cuda.device_count() ):
print(f"- GPU {i} allocated: {measures[str(snake_case )]:.2f}MiB" )
__UpperCamelCase = measures[f"{i}-peak"]
print(f"- GPU {i} peak: {peak:.2f}MiB" )
print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB" )
print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB" )
| 451
|
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
lowercase__ : int = True
from torch.cuda.amp import autocast
lowercase__ : Dict = logging.getLogger(__name__)
def A_ ( snake_case : List[str]=None , snake_case : int=None ) -> List[str]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=snake_case )
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
_snake_case = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_snake_case = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
_snake_case = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
_snake_case = field(
default=0.1 , metadata={'help': 'The dropout ratio for the attention probabilities.'} )
_snake_case = field(
default=0.1 , metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'} )
_snake_case = field(
default=0.1 , metadata={
'help': 'The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'
} , )
_snake_case = field(
default=0.1 , metadata={'help': 'The dropout probabilitiy for all 1D convolutional layers in feature extractor.'} , )
_snake_case = field(
default=0.05 , metadata={
'help': (
'Propability of each feature vector along the time axis to be chosen as the start of the vector'
'span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'
'vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'
)
} , )
_snake_case = field(default=0.0 , metadata={'help': 'The LayerDrop probability.'} )
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
_snake_case = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_snake_case = field(
default='train+validation' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_snake_case = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
_snake_case = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
_snake_case = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_snake_case = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of validation examples to this '
'value if set.'
)
} , )
_snake_case = list_field(
default=[',', '?', '.', '!', '-', ';', ':', '""', '%', '\'', '"', '�'] , metadata={'help': 'A list of characters to remove from the transcripts.'} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
_snake_case = 42
_snake_case = True
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = None
def __call__( self , SCREAMING_SNAKE_CASE_ )-> Dict[str, torch.Tensor]:
'''simple docstring'''
__UpperCamelCase = [{'''input_values''': feature['''input_values''']} for feature in features]
__UpperCamelCase = [{'''input_ids''': feature['''labels''']} for feature in features]
__UpperCamelCase = self.processor.pad(
SCREAMING_SNAKE_CASE_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
__UpperCamelCase = self.processor.pad(
labels=SCREAMING_SNAKE_CASE_ , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
__UpperCamelCase = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
__UpperCamelCase = labels
return batch
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> torch.Tensor:
'''simple docstring'''
model.train()
__UpperCamelCase = self._prepare_inputs(SCREAMING_SNAKE_CASE_ )
if self.use_amp:
with autocast():
__UpperCamelCase = self.compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
__UpperCamelCase = self.compute_loss(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
__UpperCamelCase = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
__UpperCamelCase = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(F"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']" )
if self.args.gradient_accumulation_steps > 1:
__UpperCamelCase = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(SCREAMING_SNAKE_CASE_ ).backward()
elif self.use_apex:
with amp.scale_loss(SCREAMING_SNAKE_CASE_ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(SCREAMING_SNAKE_CASE_ )
else:
loss.backward()
return loss.detach()
def A_ ( ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__UpperCamelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__UpperCamelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , snake_case )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
__UpperCamelCase = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
__UpperCamelCase = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
__UpperCamelCase = f"[{''.join(data_args.chars_to_ignore )}]"
def remove_special_characters(snake_case : Tuple ):
__UpperCamelCase = re.sub(snake_case , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
__UpperCamelCase = train_dataset.map(snake_case , remove_columns=['''sentence'''] )
__UpperCamelCase = eval_dataset.map(snake_case , remove_columns=['''sentence'''] )
def extract_all_chars(snake_case : Dict ):
__UpperCamelCase = ''' '''.join(batch['''text'''] )
__UpperCamelCase = list(set(snake_case ) )
return {"vocab": [vocab], "all_text": [all_text]}
__UpperCamelCase = train_dataset.map(
snake_case , batched=snake_case , batch_size=-1 , keep_in_memory=snake_case , remove_columns=train_dataset.column_names , )
__UpperCamelCase = train_dataset.map(
snake_case , batched=snake_case , batch_size=-1 , keep_in_memory=snake_case , remove_columns=eval_dataset.column_names , )
__UpperCamelCase = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
__UpperCamelCase = {v: k for k, v in enumerate(snake_case )}
__UpperCamelCase = vocab_dict[''' ''']
del vocab_dict[" "]
__UpperCamelCase = len(snake_case )
__UpperCamelCase = len(snake_case )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(snake_case , snake_case )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0.0 , do_normalize=snake_case , return_attention_mask=snake_case )
__UpperCamelCase = WavaVecaProcessor(feature_extractor=snake_case , tokenizer=snake_case )
__UpperCamelCase = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
__UpperCamelCase = min(len(snake_case ) , data_args.max_train_samples )
__UpperCamelCase = train_dataset.select(range(snake_case ) )
if data_args.max_val_samples is not None:
__UpperCamelCase = eval_dataset.select(range(data_args.max_val_samples ) )
__UpperCamelCase = torchaudio.transforms.Resample(48000 , 16000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(snake_case : str ):
__UpperCamelCase , __UpperCamelCase = torchaudio.load(batch['''path'''] )
__UpperCamelCase = resampler(snake_case ).squeeze().numpy()
__UpperCamelCase = 16000
__UpperCamelCase = batch['''text''']
return batch
__UpperCamelCase = train_dataset.map(
snake_case , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
__UpperCamelCase = eval_dataset.map(
snake_case , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(snake_case : Optional[int] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), f"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."
__UpperCamelCase = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(snake_case )
return batch
__UpperCamelCase = train_dataset.map(
snake_case , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=snake_case , num_proc=data_args.preprocessing_num_workers , )
__UpperCamelCase = eval_dataset.map(
snake_case , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=snake_case , num_proc=data_args.preprocessing_num_workers , )
# Metric
__UpperCamelCase = datasets.load_metric('''wer''' )
def compute_metrics(snake_case : int ):
__UpperCamelCase = pred.predictions
__UpperCamelCase = np.argmax(snake_case , axis=-1 )
__UpperCamelCase = processor.tokenizer.pad_token_id
__UpperCamelCase = processor.batch_decode(snake_case )
# we do not want to group tokens when computing the metrics
__UpperCamelCase = processor.batch_decode(pred.label_ids , group_tokens=snake_case )
__UpperCamelCase = wer_metric.compute(predictions=snake_case , references=snake_case )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
__UpperCamelCase = DataCollatorCTCWithPadding(processor=snake_case , padding=snake_case )
# Initialize our Trainer
__UpperCamelCase = CTCTrainer(
model=snake_case , data_collator=snake_case , args=snake_case , compute_metrics=snake_case , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
__UpperCamelCase = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
__UpperCamelCase = model_args.model_name_or_path
else:
__UpperCamelCase = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
__UpperCamelCase = trainer.train(resume_from_checkpoint=snake_case )
trainer.save_model()
__UpperCamelCase = train_result.metrics
__UpperCamelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(snake_case )
)
__UpperCamelCase = min(snake_case , len(snake_case ) )
trainer.log_metrics('''train''' , snake_case )
trainer.save_metrics('''train''' , snake_case )
trainer.save_state()
# Evaluation
__UpperCamelCase = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCamelCase = trainer.evaluate()
__UpperCamelCase = data_args.max_val_samples if data_args.max_val_samples is not None else len(snake_case )
__UpperCamelCase = min(snake_case , len(snake_case ) )
trainer.log_metrics('''eval''' , snake_case )
trainer.save_metrics('''eval''' , snake_case )
return results
if __name__ == "__main__":
main()
| 451
| 1
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
_lowerCamelCase = ['gpt2']
_lowerCamelCase = 'gpt2'
if is_tf_available():
class UpperCamelCase_ ( tf.Module ):
def __init__( self :Optional[Any] , __A :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = tokenizer
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__A )
SCREAMING_SNAKE_CASE__ = TFGPTaLMHeadModel.from_config(__A )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text""" ),) )
def _snake_case ( self :Optional[int] , __A :int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.tokenizer(__A )
SCREAMING_SNAKE_CASE__ = tokenized["""input_ids"""].to_tensor()
SCREAMING_SNAKE_CASE__ = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
SCREAMING_SNAKE_CASE__ = self.model(input_ids=__A , attention_mask=__A )["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :List[Any] ) -> Dict:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = [GPTaTokenizer.from_pretrained(__A ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
SCREAMING_SNAKE_CASE__ = [TFGPTaTokenizer.from_pretrained(__A ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
SCREAMING_SNAKE_CASE__ = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
SCREAMING_SNAKE_CASE__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
SCREAMING_SNAKE_CASE__ = tokenizer([test_inputs] , return_tensors="""tf""" )
SCREAMING_SNAKE_CASE__ = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
SCREAMING_SNAKE_CASE__ = python_outputs[key].numpy()
SCREAMING_SNAKE_CASE__ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__A , tf.intaa ) == tf_outputs_values ) )
@slow
def _snake_case ( self :Any ) -> int:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE__ = tf.function(__A )
for test_inputs in self.test_sentences:
SCREAMING_SNAKE_CASE__ = tf.constant(__A )
SCREAMING_SNAKE_CASE__ = compiled_tokenizer(__A )
SCREAMING_SNAKE_CASE__ = tf_tokenizer(__A )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _snake_case ( self :List[Any] ) -> Any:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE__ = ModelToSave(tokenizer=__A )
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE__ = model.serving(__A ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE__ = Path(__A ) / """saved.model"""
tf.saved_model.save(__A , __A , signatures={"""serving_default""": model.serving} )
SCREAMING_SNAKE_CASE__ = tf.saved_model.load(__A )
SCREAMING_SNAKE_CASE__ = loaded_model.signatures["""serving_default"""](__A )["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def _snake_case ( self :Dict ) -> int:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE__ = tf_tokenizer(__A ) # Build model with some sample inputs
SCREAMING_SNAKE_CASE__ = tf_tokenizer.get_config()
SCREAMING_SNAKE_CASE__ = TFGPTaTokenizer.from_config(__A )
SCREAMING_SNAKE_CASE__ = model_from_config(__A )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def _snake_case ( self :List[Any] ) -> Any:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
SCREAMING_SNAKE_CASE__ = 12_3123
for max_length in [3, 5, 1024]:
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE__ = tf_tokenizer(__A , max_length=__A )
SCREAMING_SNAKE_CASE__ = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length
| 6
|
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = tokenizer(example["""content"""] , truncation=a__ )["""input_ids"""]
__SCREAMING_SNAKE_CASE = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
UpperCAmelCase : Tuple = HfArgumentParser(PretokenizationArguments)
UpperCAmelCase : Optional[Any] = parser.parse_args()
if args.num_workers is None:
UpperCAmelCase : List[Any] = multiprocessing.cpu_count()
UpperCAmelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_dir)
UpperCAmelCase : Optional[int] = time.time()
UpperCAmelCase : Any = load_dataset(args.dataset_name, split='train')
print(f"""Dataset loaded in {time.time()-t_start:.2f}s""")
UpperCAmelCase : List[str] = time.time()
UpperCAmelCase : List[str] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(f"""Dataset tokenized in {time.time()-t_start:.2f}s""")
UpperCAmelCase : List[Any] = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 627
| 0
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__SCREAMING_SNAKE_CASE : Dict =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple ={
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class A_ ( __a ):
_A :Optional[int] = '''gpt_neo'''
_A :Tuple = ['''past_key_values''']
_A :str = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : Dict , snake_case__ : List[str]=5_02_57 , snake_case__ : Dict=20_48 , snake_case__ : List[Any]=20_48 , snake_case__ : Dict=24 , snake_case__ : Union[str, Any]=[[["global", "local"], 12]] , snake_case__ : List[str]=16 , snake_case__ : List[Any]=None , snake_case__ : int=2_56 , snake_case__ : Any="gelu_new" , snake_case__ : Union[str, Any]=0.0 , snake_case__ : Union[str, Any]=0.0 , snake_case__ : List[str]=0.0 , snake_case__ : Any=0.1 , snake_case__ : int=1E-5 , snake_case__ : str=0.02 , snake_case__ : int=True , snake_case__ : Union[str, Any]=5_02_56 , snake_case__ : Optional[int]=5_02_56 , **snake_case__ : List[Any] , ):
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = hidden_size
lowercase = num_layers
lowercase = num_heads
lowercase = intermediate_size
lowercase = window_size
lowercase = activation_function
lowercase = resid_dropout
lowercase = embed_dropout
lowercase = attention_dropout
lowercase = classifier_dropout
lowercase = layer_norm_epsilon
lowercase = initializer_range
lowercase = use_cache
lowercase = bos_token_id
lowercase = eos_token_id
lowercase = attention_types
lowercase = self.expand_attention_types_params(snake_case__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
F"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
F"""`config.num_layers = {self.num_layers}`. """
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case__ : Optional[int] ):
lowercase = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
import torch
lowercase = input.size()
lowercase = len(lowerCAmelCase__ )
lowercase = shape[dimension]
lowercase = torch.arange(0 ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowercase = torch.div(sizedim - size ,lowerCAmelCase__ ,rounding_mode="""floor""" ) + 1
lowercase = torch.arange(lowerCAmelCase__ ) + low_indices[:min_length][:, None]
lowercase = [slice(lowerCAmelCase__ )] * rank
lowercase = indices
lowercase = input[s]
lowercase = list(range(0 ,rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(lowerCAmelCase__ )
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
import torch
lowercase = torch.arange(1 ,lowerCAmelCase__ )
lowercase = torch.remainder(lowerCAmelCase__ ,lowerCAmelCase__ )
lowercase = remainders == 0
lowercase = candidates[divisor_indices]
lowercase = torch.max(lowerCAmelCase__ )
return largest_divisor, torch.div(lowerCAmelCase__ ,lowerCAmelCase__ ,rounding_mode="""floor""" )
class A_ ( __a ):
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowercase = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction="""inputs""" )
lowercase = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowercase = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
return self._config.num_heads
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case__ : PreTrainedTokenizer , snake_case__ : int = -1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional[TensorType] = None , ):
lowercase = super(snake_case__ , self ).generate_dummy_inputs(
snake_case__ , batch_size=snake_case__ , seq_length=snake_case__ , is_pair=snake_case__ , framework=snake_case__ )
# We need to order the input in the way they appears in the forward()
lowercase = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowercase , lowercase = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowercase = seqlen + 2
lowercase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase = [
(torch.zeros(snake_case__ ), torch.zeros(snake_case__ )) for _ in range(self.num_layers )
]
lowercase = common_inputs["""attention_mask"""]
if self.use_past:
lowercase = ordered_inputs["""attention_mask"""].dtype
lowercase = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(snake_case__ , snake_case__ , dtype=snake_case__ )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return 13
| 705
|
from __future__ import annotations
import bisect
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0 ,lowerCAmelCase__ = -1 ):
if hi < 0:
lowercase = len(lowerCAmelCase__ )
while lo < hi:
lowercase = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
lowercase = mid + 1
else:
lowercase = mid
return lo
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0 ,lowerCAmelCase__ = -1 ):
if hi < 0:
lowercase = len(lowerCAmelCase__ )
while lo < hi:
lowercase = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
lowercase = mid + 1
else:
lowercase = mid
return lo
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0 ,lowerCAmelCase__ = -1 ):
sorted_collection.insert(bisect_left(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ )
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = 0 ,lowerCAmelCase__ = -1 ):
sorted_collection.insert(bisect_right(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ) ,lowerCAmelCase__ )
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = 0
lowercase = len(lowerCAmelCase__ ) - 1
while left <= right:
lowercase = left + (right - left) // 2
lowercase = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
lowercase = midpoint - 1
else:
lowercase = midpoint + 1
return None
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = bisect.bisect_left(lowerCAmelCase__ ,lowerCAmelCase__ )
if index != len(lowerCAmelCase__ ) and sorted_collection[index] == item:
return index
return None
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
if right < left:
return None
lowercase = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,midpoint - 1 )
else:
return binary_search_by_recursion(lowerCAmelCase__ ,lowerCAmelCase__ ,midpoint + 1 ,lowerCAmelCase__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] =input('''Enter numbers separated by comma:\n''').strip()
__SCREAMING_SNAKE_CASE : Tuple =sorted(int(item) for item in user_input.split(''','''))
__SCREAMING_SNAKE_CASE : Tuple =int(input('''Enter a single number to be found in the list:\n'''))
__SCREAMING_SNAKE_CASE : Union[str, Any] =binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''')
| 72
| 0
|
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _a : CLIPSegForImageSegmentation , _a : CLIPSegProcessor , _a : AutoencoderKL , _a : CLIPTextModel , _a : CLIPTokenizer , _a : UNetaDConditionModel , _a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _a : StableDiffusionSafetyChecker , _a : CLIPImageProcessor , ) -> int:
super().__init__()
if hasattr(scheduler.config , 'steps_offset' ) and scheduler.config.steps_offset != 1:
__lowerCamelCase : int = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
'to update the config accordingly as leaving `steps_offset` might led to incorrect results'
' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'
' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'
' file'
)
deprecate('steps_offset!=1' , '1.0.0' , _a , standard_warn=_a )
__lowerCamelCase : int = dict(scheduler.config )
__lowerCamelCase : Any = 1
__lowerCamelCase : List[Any] = FrozenDict(_a )
if hasattr(scheduler.config , 'skip_prk_steps' ) and scheduler.config.skip_prk_steps is False:
__lowerCamelCase : str = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'
' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'
' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'
' Hub, it would be very nice if you could open a Pull request for the'
' `scheduler/scheduler_config.json` file'
)
deprecate('skip_prk_steps not set' , '1.0.0' , _a , standard_warn=_a )
__lowerCamelCase : Dict = dict(scheduler.config )
__lowerCamelCase : Tuple = True
__lowerCamelCase : Union[str, Any] = FrozenDict(_a )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
segmentation_model=_a , segmentation_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , safety_checker=_a , feature_extractor=_a , )
def _lowercase ( self : Optional[int] , _a : Optional[Union[str, int]] = "auto" ) -> List[str]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowerCamelCase : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def _lowercase ( self : Optional[Any] ) -> int:
self.enable_attention_slicing(_a )
def _lowercase ( self : str ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowerCamelCase : Dict = torch.device('cuda' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self : Dict ) -> int:
if self.device != torch.device('meta' ) or not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : List[str] , _a : Union[str, List[str]] , _a : Union[torch.FloatTensor, PIL.Image.Image] , _a : str , _a : int = 512 , _a : int = 512 , _a : int = 50 , _a : float = 7.5 , _a : Optional[Union[str, List[str]]] = None , _a : Optional[int] = 1 , _a : float = 0.0 , _a : Optional[torch.Generator] = None , _a : Optional[torch.FloatTensor] = None , _a : Optional[str] = "pil" , _a : bool = True , _a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _a : int = 1 , **_a : Optional[Any] , ) -> int:
__lowerCamelCase : Union[str, Any] = self.segmentation_processor(
text=[text] , images=[image] , padding='max_length' , return_tensors='pt' ).to(self.device )
__lowerCamelCase : Union[str, Any] = self.segmentation_model(**_a )
__lowerCamelCase : List[str] = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__lowerCamelCase : Optional[Any] = self.numpy_to_pil(_a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__lowerCamelCase : Tuple = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_a , image=_a , mask_image=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , )
| 459
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_UpperCamelCase = 'docs/source/en/_toctree.yml'
def a_ ( _lowerCAmelCase ) -> Any:
__lowerCamelCase : Optional[int] = defaultdict(_lowerCAmelCase )
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Union[str, Any] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(_lowerCAmelCase )
__lowerCamelCase : Dict = new_doc_list
__lowerCamelCase : Optional[Any] = [key for key, value in counts.items() if value > 1]
__lowerCamelCase : int = []
for duplicate_key in duplicates:
__lowerCamelCase : int = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(_lowerCAmelCase ) > 1:
raise ValueError(
F'{duplicate_key} is present several times in the documentation table of content at '
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
__lowerCamelCase : Dict = sorted(_lowerCAmelCase ,key=lambda _lowerCAmelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_lowerCAmelCase ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(_lowerCAmelCase )
# Sort
return overview_doc
def a_ ( _lowerCAmelCase=False ) -> Optional[Any]:
with open(_lowerCAmelCase ,encoding='utf-8' ) as f:
__lowerCamelCase : str = yaml.safe_load(f.read() )
# Get to the API doc
__lowerCamelCase : Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__lowerCamelCase : List[str] = content[api_idx]['sections']
# Then to the model doc
__lowerCamelCase : List[Any] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__lowerCamelCase : List[Any] = api_doc[scheduler_idx]['sections']
__lowerCamelCase : Optional[Any] = clean_doc_toc(_lowerCAmelCase )
__lowerCamelCase : str = False
if new_scheduler_doc != scheduler_doc:
__lowerCamelCase : int = True
if overwrite:
__lowerCamelCase : Any = new_scheduler_doc
if diff:
if overwrite:
__lowerCamelCase : Tuple = api_doc
with open(_lowerCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write(yaml.dump(_lowerCAmelCase ,allow_unicode=_lowerCAmelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def a_ ( _lowerCAmelCase=False ) -> List[Any]:
with open(_lowerCAmelCase ,encoding='utf-8' ) as f:
__lowerCamelCase : List[str] = yaml.safe_load(f.read() )
# Get to the API doc
__lowerCamelCase : List[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__lowerCamelCase : Optional[Any] = content[api_idx]['sections']
# Then to the model doc
__lowerCamelCase : List[Any] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__lowerCamelCase : int = False
__lowerCamelCase : str = api_doc[pipeline_idx]['sections']
__lowerCamelCase : Optional[Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__lowerCamelCase : str = pipeline_doc['section']
__lowerCamelCase : Optional[Any] = clean_doc_toc(_lowerCAmelCase )
if overwrite:
__lowerCamelCase : Union[str, Any] = new_sub_pipeline_doc
new_pipeline_docs.append(_lowerCAmelCase )
# sort overall pipeline doc
__lowerCamelCase : int = clean_doc_toc(_lowerCAmelCase )
if new_pipeline_docs != pipeline_docs:
__lowerCamelCase : Tuple = True
if overwrite:
__lowerCamelCase : int = new_pipeline_docs
if diff:
if overwrite:
__lowerCamelCase : Tuple = api_doc
with open(_lowerCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write(yaml.dump(_lowerCAmelCase ,allow_unicode=_lowerCAmelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_UpperCamelCase = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 459
| 1
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str=1_3 , SCREAMING_SNAKE_CASE__ : Tuple=3_2 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : str=4 , SCREAMING_SNAKE_CASE__ : Tuple=[1_0, 2_0, 3_0, 4_0] , SCREAMING_SNAKE_CASE__ : Dict=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : str=3_7 , SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=1_0 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : str=None , ) -> Optional[Any]:
a_ : List[str] = parent
a_ : Any = batch_size
a_ : Any = image_size
a_ : Dict = num_channels
a_ : Tuple = num_stages
a_ : Optional[int] = hidden_sizes
a_ : Dict = depths
a_ : Dict = is_training
a_ : Dict = use_labels
a_ : Union[str, Any] = intermediate_size
a_ : Any = hidden_act
a_ : Tuple = type_sequence_label_size
a_ : Union[str, Any] = initializer_range
a_ : int = out_features
a_ : str = num_labels
a_ : Optional[Any] = scope
a_ : List[Any] = num_stages
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
a_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : Dict = None
if self.use_labels:
a_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : Any = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=SCREAMING_SNAKE_CASE__ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=SCREAMING_SNAKE_CASE__ , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
a_ : Dict = UperNetForSemanticSegmentation(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : str = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
a_ : str = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) ,
) : List[Any] = config_and_inputs
a_ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : Any = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
snake_case__ : Dict = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
snake_case__ : str = False
snake_case__ : List[Any] = False
snake_case__ : str = False
snake_case__ : str = False
snake_case__ : str = False
snake_case__ : Union[str, Any] = False
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
a_ : List[Any] = UperNetModelTester(self )
a_ : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
return
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
a_ , a_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : List[Any] = model_class(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : Dict = [*signature.parameters.keys()]
a_ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
a_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
pass
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str ):
a_ : int = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
a_ : str = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
a_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a_ : str = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
a_ , a_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ : Optional[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : int ) -> str:
a_ , a_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Tuple = _config_zero_init(SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
a_ : Optional[Any] = model_class(config=SCREAMING_SNAKE_CASE__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason='UperNet does not have tied weights' )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : Optional[Any] = UperNetForSemanticSegmentation.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
"""simple docstring"""
a_ : str = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
a_ : Union[str, Any] = Image.open(__A ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
a_ : Tuple = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
a_ : Optional[Any] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(SCREAMING_SNAKE_CASE__ )
a_ : Dict = prepare_img()
a_ : Tuple = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
a_ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ )
a_ : Dict = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
a_ : int = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
a_ : Dict = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
a_ : Any = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(SCREAMING_SNAKE_CASE__ )
a_ : int = prepare_img()
a_ : Dict = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
a_ : Optional[Any] = model(**SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
a_ : Any = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
| 443
|
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : List[Any] , __A : List[Any] , __A : Optional[int] ) -> Dict:
"""simple docstring"""
a_ : Optional[Any] = multiprocessing.Manager()
a_ : Tuple = manager.list()
a_ : Optional[int] = multiprocessing.Process(target=__A , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : Optional[Any] , __A : Optional[Any] ) -> Dict:
"""simple docstring"""
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
a_ : Any = shutil.rmtree
a_ : List[str] = os.rmdir
a_ : List[Any] = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
a_ : Any = {}
with swallow_io():
with time_limit(__A ):
exec(__A , __A )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(F"""failed: {e}""" )
# Needed for cleaning up.
a_ : Tuple = rmtree
a_ : Any = rmdir
a_ : Optional[int] = chdir
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> str:
"""simple docstring"""
def signal_handler(__A : List[str] , __A : Dict ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , __A )
signal.signal(signal.SIGALRM , __A )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
"""simple docstring"""
a_ : int = WriteOnlyStringIO()
with contextlib.redirect_stdout(__A ):
with contextlib.redirect_stderr(__A ):
with redirect_stdin(__A ):
yield
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as dirname:
with chdir(__A ):
yield dirname
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
pass
class SCREAMING_SNAKE_CASE__ ( io.StringIO ):
def SCREAMING_SNAKE_CASE ( self : List[Any] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
raise OSError
def SCREAMING_SNAKE_CASE ( self : Tuple , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]:
raise OSError
def SCREAMING_SNAKE_CASE ( self : int , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
raise OSError
def SCREAMING_SNAKE_CASE ( self : Any , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
return False
class SCREAMING_SNAKE_CASE__ ( contextlib._RedirectStream ): # type: ignore
snake_case__ : Optional[int] = '''stdin'''
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE_ ( __A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if root == ".":
yield
return
a_ : str = os.getcwd()
os.chdir(__A )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__A )
def SCREAMING_SNAKE_CASE_ ( __A : Dict=None ) -> Dict:
"""simple docstring"""
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
a_ : str = None
a_ : str = None
import os
a_ : Optional[int] = '1'
a_ : Any = None
a_ : int = None
a_ : int = None
a_ : Tuple = None
a_ : Dict = None
a_ : Any = None
a_ : Dict = None
a_ : Dict = None
a_ : List[str] = None
a_ : Optional[Any] = None
a_ : Tuple = None
a_ : Union[str, Any] = None
a_ : Dict = None
a_ : Dict = None
a_ : int = None
a_ : str = None
a_ : Union[str, Any] = None
a_ : str = None
a_ : Optional[int] = None
a_ : List[Any] = None
a_ : Union[str, Any] = None
a_ : Dict = None
a_ : Tuple = None
a_ : List[str] = None
a_ : str = None
a_ : str = None
a_ : str = None
import shutil
a_ : Dict = None
a_ : Tuple = None
a_ : str = None
import subprocess
a_ : str = None # type: ignore
a_ : List[Any] = None
import sys
a_ : Optional[int] = None
a_ : int = None
a_ : Any = None
a_ : str = None
a_ : str = None
| 443
| 1
|
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __lowercase ( __lowerCamelCase ):
snake_case_ = """"""
snake_case_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
snake_case_ = None # compression type in fsspec. ex: "gzip"
snake_case_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : Optional[int] ,A : str = "" ,A : Optional[str] = None ,A : Optional[dict] = None ,**A : Any ):
'''simple docstring'''
super().__init__(self ,**A )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCAmelCase__ : Optional[int] = fsspec.open(
A ,mode="""rb""" ,protocol=A ,compression=self.compression ,client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" ,{} ), # To avoid issues if it was already passed.
} ,**(target_options or {}) ,)
UpperCAmelCase__ : int = os.path.basename(self.file.path.split("""::""" )[0] )
UpperCAmelCase__ : List[Any] = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
UpperCAmelCase__ : Optional[int] = None
@classmethod
def __lowercase ( cls : Optional[Any] ,A : Any ):
'''simple docstring'''
# compressed file paths are always relative to the archive root
return super()._strip_protocol(A ).lstrip("""/""" )
def __lowercase ( self : str ):
'''simple docstring'''
if self.dir_cache is None:
UpperCAmelCase__ : List[str] = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
UpperCAmelCase__ : List[str] = {f["""name"""]: f}
def __lowercase ( self : Dict ,A : str ):
'''simple docstring'''
return self.file.open().read()
def __lowercase ( self : Optional[int] ,A : str ,A : str = "rb" ,A : str=None ,A : List[Any]=True ,A : List[str]=None ,**A : Tuple ,):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self._strip_protocol(A )
if mode != "rb":
raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class __lowercase ( __lowerCamelCase ):
snake_case_ = """bz2"""
snake_case_ = """bz2"""
snake_case_ = """.bz2"""
class __lowercase ( __lowerCamelCase ):
snake_case_ = """gzip"""
snake_case_ = """gzip"""
snake_case_ = """.gz"""
class __lowercase ( __lowerCamelCase ):
snake_case_ = """lz4"""
snake_case_ = """lz4"""
snake_case_ = """.lz4"""
class __lowercase ( __lowerCamelCase ):
snake_case_ = """xz"""
snake_case_ = """xz"""
snake_case_ = """.xz"""
class __lowercase ( __lowerCamelCase ):
snake_case_ = """zstd"""
snake_case_ = """zstd"""
snake_case_ = """.zst"""
def __init__( self : Union[str, Any] ,A : str ,A : str = "rb" ,A : Optional[str] = None ,A : Optional[dict] = None ,A : int = DEFAULT_BLOCK_SIZE ,**A : List[str] ,):
'''simple docstring'''
super().__init__(
fo=A ,mode=A ,target_protocol=A ,target_options=A ,block_size=A ,**A ,)
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCAmelCase__ : str = self.file.__enter__
class __lowercase :
def __init__( self : Tuple ,A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = file_
def __enter__( self : List[Any] ):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self : List[Any] ,*A : Union[str, Any] ,**A : Tuple ):
'''simple docstring'''
self._file.__exit__(*A ,**A )
def __iter__( self : Tuple ):
'''simple docstring'''
return iter(self._file )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return next(self._file )
def __getattr__( self : Tuple ,A : Any ):
'''simple docstring'''
return getattr(self._file ,A )
def fixed_enter(*A : List[Any] ,**A : List[str] ):
return WrappedFile(_enter(*A ,**A ) )
UpperCAmelCase__ : str = fixed_enter
| 65
|
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
UpperCamelCase : int = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
UpperCamelCase : List[Any] = dataset.iloc[:, 1:2].values
UpperCamelCase : Any = dataset.iloc[:, 2].values
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : str = train_test_split(X, y, test_size=0.2, random_state=0)
UpperCamelCase : List[str] = PolynomialFeatures(degree=4)
UpperCamelCase : Optional[int] = poly_reg.fit_transform(X)
UpperCamelCase : List[Any] = LinearRegression()
pol_reg.fit(X_poly, y)
def __snake_case ( ) -> Optional[int]:
"""simple docstring"""
plt.scatter(UpperCamelCase__ , UpperCamelCase__ , color='red' )
plt.plot(UpperCamelCase__ , pol_reg.predict(poly_reg.fit_transform(UpperCamelCase__ ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 690
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _A ( unittest.TestCase ):
'''simple docstring'''
_lowercase = ViTImageProcessor if is_vision_available() else None
@property
def __lowerCAmelCase ( self : List[str] )-> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : Dict )-> Optional[int]:
snake_case__ : Any = (3, 32, 128)
snake_case__ : List[str] = tempfile.mkdtemp()
# fmt: off
snake_case__ : List[str] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
snake_case__ : List[Any] = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
snake_case__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase ) + """\n""" )
snake_case__ : Dict = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
snake_case__ : str = os.path.join(self.tmpdirname , lowerCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCamelCase , lowerCamelCase )
def __lowerCAmelCase ( self : List[Any] , **lowerCamelCase : List[Any] )-> List[str]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def __lowerCAmelCase ( self : Optional[Any] , **lowerCamelCase : str )-> Tuple:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase )
def __lowerCAmelCase ( self : Any )-> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : Tuple )-> List[str]:
snake_case__ : Dict = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
snake_case__ : str = Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) )
return image_input
def __lowerCAmelCase ( self : int )-> str:
snake_case__ : Union[str, Any] = self.get_tokenizer()
snake_case__ : Optional[int] = self.get_image_processor()
snake_case__ : Tuple = MgpstrProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
snake_case__ : str = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def __lowerCAmelCase ( self : Union[str, Any] )-> List[Any]:
snake_case__ : int = self.get_tokenizer()
snake_case__ : List[Any] = self.get_image_processor()
snake_case__ : int = MgpstrProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
snake_case__ : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case__ : str = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
snake_case__ : Dict = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def __lowerCAmelCase ( self : Union[str, Any] )-> Optional[Any]:
snake_case__ : int = self.get_image_processor()
snake_case__ : str = self.get_tokenizer()
snake_case__ : Any = MgpstrProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
snake_case__ : Tuple = self.prepare_image_inputs()
snake_case__ : Optional[int] = image_processor(lowerCamelCase , return_tensors="""np""" )
snake_case__ : Optional[int] = processor(images=lowerCamelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self : Union[str, Any] )-> List[Any]:
snake_case__ : Optional[Any] = self.get_image_processor()
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : int = MgpstrProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
snake_case__ : Any = """test"""
snake_case__ : int = processor(text=lowerCamelCase )
snake_case__ : Tuple = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase ( self : Union[str, Any] )-> List[Any]:
snake_case__ : Optional[int] = self.get_image_processor()
snake_case__ : List[str] = self.get_tokenizer()
snake_case__ : Tuple = MgpstrProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
snake_case__ : Optional[int] = """test"""
snake_case__ : Union[str, Any] = self.prepare_image_inputs()
snake_case__ : List[Any] = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def __lowerCAmelCase ( self : Dict )-> List[Any]:
snake_case__ : Optional[int] = self.get_image_processor()
snake_case__ : Tuple = self.get_tokenizer()
snake_case__ : Tuple = MgpstrProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
snake_case__ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
snake_case__ : str = processor.char_decode(lowerCamelCase )
snake_case__ : List[Any] = tokenizer.batch_decode(lowerCamelCase )
snake_case__ : Optional[int] = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def __lowerCAmelCase ( self : str )-> List[str]:
snake_case__ : str = self.get_image_processor()
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : Tuple = MgpstrProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
snake_case__ : str = None
snake_case__ : List[Any] = self.prepare_image_inputs()
snake_case__ : List[Any] = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __lowerCAmelCase ( self : Dict )-> List[str]:
snake_case__ : Optional[Any] = self.get_image_processor()
snake_case__ : str = self.get_tokenizer()
snake_case__ : Union[str, Any] = MgpstrProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
snake_case__ : List[str] = torch.randn(1 , 27 , 38 )
snake_case__ : int = torch.randn(1 , 27 , 50_257 )
snake_case__ : Any = torch.randn(1 , 27 , 30_522 )
snake_case__ : Union[str, Any] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 172
|
'''simple docstring'''
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _A ( UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : NestedDataStructureLike[PathLike] , lowerCamelCase : Optional[NamedSplit] = None , lowerCamelCase : Optional[Features] = None , lowerCamelCase : str = None , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : Optional[int] = None , **lowerCamelCase : str , )-> str:
super().__init__(
lowerCamelCase , split=lowerCamelCase , features=lowerCamelCase , cache_dir=lowerCamelCase , keep_in_memory=lowerCamelCase , streaming=lowerCamelCase , num_proc=lowerCamelCase , **lowerCamelCase , )
snake_case__ : List[Any] = path_or_paths if isinstance(lowerCamelCase , lowerCamelCase ) else {self.split: path_or_paths}
snake_case__ : Optional[int] = Text(
cache_dir=lowerCamelCase , data_files=lowerCamelCase , features=lowerCamelCase , **lowerCamelCase , )
def __lowerCAmelCase ( self : List[str] )-> str:
# Build iterable dataset
if self.streaming:
snake_case__ : Optional[int] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
snake_case__ : List[str] = None
snake_case__ : Dict = None
snake_case__ : Optional[Any] = None
snake_case__ : List[Any] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase , download_mode=lowerCamelCase , verification_mode=lowerCamelCase , base_path=lowerCamelCase , num_proc=self.num_proc , )
snake_case__ : Tuple = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 172
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _lowerCamelCase ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Path , lowerCamelCase__ : str = None , lowerCamelCase__ : str = None , lowerCamelCase__ : str = None , ):
if config_name_or_path is None:
lowercase__ : Tuple = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
lowercase__ : List[Any] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowercase__ : List[Any] = question_encoder_name_or_path
lowercase__ : Any = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
lowercase__ : Union[str, Any] = RagConfig.from_pretrained(lowerCamelCase__ )
lowercase__ : List[Any] = AutoConfig.from_pretrained(lowerCamelCase__ )
lowercase__ : Dict = AutoConfig.from_pretrained(lowerCamelCase__ )
lowercase__ : Union[str, Any] = gen_config
lowercase__ : Any = question_encoder_config
lowercase__ : str = model_class.from_pretrained_question_encoder_generator(
lowerCamelCase__ , lowerCamelCase__ , config=lowerCamelCase__ )
rag_model.save_pretrained(lowerCamelCase__ )
# Sanity check.
model_class.from_pretrained(lowerCamelCase__ )
# Save tokenizers.
lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase__ )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
lowercase__ : Optional[int] = AutoTokenizer.from_pretrained(lowerCamelCase__ )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
__snake_case = parser.parse_args()
__snake_case = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 200
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 200
| 1
|
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
lowerCamelCase : Union[str, Any] = '.'
if __name__ == "__main__":
lowerCamelCase : int = os.path.join(REPO_PATH, 'utils/documentation_tests.txt')
lowerCamelCase : Optional[Any] = []
lowerCamelCase : str = []
with open(doctest_file_path) as fp:
for line in fp:
lowerCamelCase : Optional[Any] = line.strip()
lowerCamelCase : int = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
lowerCamelCase : Optional[int] = '\n'.join(non_existent_paths)
raise ValueError(f"""`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}""")
if all_paths != sorted(all_paths):
raise ValueError('Files in `utils/documentation_tests.txt` are not in alphabetical order.')
| 708
|
import inspect
import unittest
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[Any]:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self ) -> Tuple:
import diffusers
from diffusers.dependency_versions_table import deps
snake_case : List[str] = inspect.getmembers(A , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
snake_case : Tuple = """k-diffusion"""
elif backend == "invisible_watermark":
snake_case : Optional[int] = """invisible-watermark"""
assert backend in deps, f"""{backend} is not in the deps table!"""
| 684
| 0
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class a__ ( _lowercase ):
__magic_name__ : Union[str, Any] = "Wav2Vec2FeatureExtractor"
__magic_name__ : Dict = "AutoTokenizer"
def __init__(self : Optional[Any], __UpperCAmelCase : str, __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
super().__init__(__UpperCAmelCase, __UpperCAmelCase )
SCREAMING_SNAKE_CASE : int = self.feature_extractor
SCREAMING_SNAKE_CASE : Any = False
@classmethod
def lowercase__ (cls : Optional[Any], __UpperCAmelCase : Union[str, Any], **__UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
try:
return super().from_pretrained(__UpperCAmelCase, **__UpperCAmelCase )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''', __UpperCAmelCase, )
SCREAMING_SNAKE_CASE : int = WavaVecaFeatureExtractor.from_pretrained(__UpperCAmelCase, **__UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaCTCTokenizer.from_pretrained(__UpperCAmelCase, **__UpperCAmelCase )
return cls(feature_extractor=__UpperCAmelCase, tokenizer=__UpperCAmelCase )
def __call__(self : List[str], *__UpperCAmelCase : Tuple, **__UpperCAmelCase : Dict ) -> int:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase, **__UpperCAmelCase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('''raw_speech''' )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop('''audio''', __UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = kwargs.pop('''sampling_rate''', __UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = kwargs.pop('''text''', __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE : str = args[0]
SCREAMING_SNAKE_CASE : str = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extractor(__UpperCAmelCase, *__UpperCAmelCase, sampling_rate=__UpperCAmelCase, **__UpperCAmelCase )
if text is not None:
SCREAMING_SNAKE_CASE : Tuple = self.tokenizer(__UpperCAmelCase, **__UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE : Dict = encodings['''input_ids''']
return inputs
def lowercase__ (self : Dict, *__UpperCAmelCase : Optional[Any], **__UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*__UpperCAmelCase, **__UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop('''input_features''', __UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop('''labels''', __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
SCREAMING_SNAKE_CASE : List[Any] = args[0]
SCREAMING_SNAKE_CASE : Any = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE : str = self.feature_extractor.pad(__UpperCAmelCase, *__UpperCAmelCase, **__UpperCAmelCase )
if labels is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.pad(__UpperCAmelCase, **__UpperCAmelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE : List[Any] = labels['''input_ids''']
return input_features
def lowercase__ (self : Optional[Any], *__UpperCAmelCase : Optional[Any], **__UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__UpperCAmelCase, **__UpperCAmelCase )
def lowercase__ (self : Any, *__UpperCAmelCase : Tuple, **__UpperCAmelCase : Dict ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.decode(*__UpperCAmelCase, **__UpperCAmelCase )
@contextmanager
def lowercase__ (self : Dict ) -> Optional[int]:
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Dict = self.tokenizer
yield
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extractor
SCREAMING_SNAKE_CASE : List[str] = False
| 507
|
'''simple docstring'''
from __future__ import annotations
def __lowercase (_SCREAMING_SNAKE_CASE :list[int] ):
if not nums:
return 0
SCREAMING_SNAKE_CASE : Tuple = nums[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for num in nums[1:]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = (
max_excluding + num,
max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
)
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 507
| 1
|
SCREAMING_SNAKE_CASE__ = 8.314_462 # Unit - J mol-1 K-1
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 52
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def A ( __UpperCamelCase ) -> Tuple:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
return max(metric_fn(__UpperCamelCase , __UpperCamelCase ) for gt in ground_truths )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = []
if args.gold_data_mode == "qa":
A__ = pd.read_csv(__UpperCamelCase , sep='\t' , header=__UpperCamelCase )
for answer_list in data[1]:
A__ = ast.literal_eval(__UpperCamelCase )
answers.append(__UpperCamelCase )
else:
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = [[reference] for reference in references]
A__ = A__ = A__ = 0
for prediction, ground_truths in zip(__UpperCamelCase , __UpperCamelCase ):
total += 1
em += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
fa += metric_max_over_ground_truths(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A__ = 100.0 * em / total
A__ = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = args.k
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = [line.strip() for line in open(__UpperCamelCase , 'r' ).readlines()]
A__ = A__ = 0
for hypo, reference in zip(__UpperCamelCase , __UpperCamelCase ):
A__ = set(hypo.split('\t' )[:k] )
A__ = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
A__ = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
def strip_title(__UpperCamelCase ):
if title.startswith('"' ):
A__ = title[1:]
if title.endswith('"' ):
A__ = title[:-1]
return title
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase , )['input_ids'].to(args.device )
A__ = rag_model.rag.question_encoder(__UpperCamelCase )
A__ = question_enc_outputs[0]
A__ = rag_model.retriever(
__UpperCamelCase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
A__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
A__ = []
for docs in all_docs:
A__ = [strip_title(__UpperCamelCase ) for title in docs['title']]
provenance_strings.append('\t'.join(__UpperCamelCase ) )
return provenance_strings
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
with torch.no_grad():
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase , return_tensors='pt' , padding=__UpperCamelCase , truncation=__UpperCamelCase )
A__ = inputs_dict.input_ids.to(args.device )
A__ = inputs_dict.attention_mask.to(args.device )
A__ = rag_model.generate( # rag_model overwrites generate
__UpperCamelCase , attention_mask=__UpperCamelCase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__UpperCamelCase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
A__ = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
if args.print_predictions:
for q, a in zip(__UpperCamelCase , __UpperCamelCase ):
logger.info('Q: {} - A: {}'.format(__UpperCamelCase , __UpperCamelCase ) )
return answers
def A ( ) -> Any:
A__ = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=__UpperCamelCase , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=__UpperCamelCase , choices=['exact', 'compressed', 'legacy'] , type=__UpperCamelCase , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=__UpperCamelCase , type=__UpperCamelCase , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=__UpperCamelCase , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=__UpperCamelCase , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=__UpperCamelCase , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=__UpperCamelCase , type=__UpperCamelCase , required=__UpperCamelCase , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=__UpperCamelCase , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=__UpperCamelCase , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=__UpperCamelCase , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=__UpperCamelCase , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=__UpperCamelCase , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=__UpperCamelCase , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
A__ = parser.parse_args()
A__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def A ( __UpperCamelCase ) -> int:
A__ = {}
if args.model_type is None:
A__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
A__ = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
A__ = args.n_docs
if args.index_name is not None:
A__ = args.index_name
if args.index_path is not None:
A__ = args.index_path
else:
A__ = BartForConditionalGeneration
A__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , __UpperCamelCase )
A__ = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
A__ = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(__UpperCamelCase ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
A__ = RagRetriever.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
A__ = model_class.from_pretrained(__UpperCamelCase , retriever=__UpperCamelCase , **__UpperCamelCase )
model.retriever.init_retrieval()
else:
A__ = model_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
A__ = []
for line in tqdm(__UpperCamelCase ):
questions.append(line.strip() )
if len(__UpperCamelCase ) == args.eval_batch_size:
A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) + '\n' )
preds_file.flush()
A__ = []
if len(__UpperCamelCase ) > 0:
A__ = evaluate_batch_fn(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
preds_file.write('\n'.join(__UpperCamelCase ) )
preds_file.flush()
score_fn(__UpperCamelCase , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = get_args()
main(args)
| 52
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
A : Tuple = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = "ernie_m"
a = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int = 250002 , SCREAMING_SNAKE_CASE : int = 768 , SCREAMING_SNAKE_CASE : int = 12 , SCREAMING_SNAKE_CASE : int = 12 , SCREAMING_SNAKE_CASE : int = 3072 , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : int = 514 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : float = 1e-05 , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Dict=0.0 , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
_A : int = vocab_size
_A : Dict = hidden_size
_A : Dict = num_hidden_layers
_A : Tuple = num_attention_heads
_A : Tuple = intermediate_size
_A : Tuple = hidden_act
_A : Optional[Any] = hidden_dropout_prob
_A : Dict = attention_probs_dropout_prob
_A : Optional[Any] = max_position_embeddings
_A : Union[str, Any] = initializer_range
_A : int = layer_norm_eps
_A : int = classifier_dropout
_A : Union[str, Any] = is_decoder
_A : List[str] = act_dropout
| 128
|
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
A : List[Any] = False
try:
A : List[Any] = _is_package_available('''google.colab''')
except ModuleNotFoundError:
pass
@input.register
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : str = None , SCREAMING_SNAKE_CASE : list = []):
_A : Optional[Any] = 0
_A : Dict = choices
_A : int = prompt
if sys.platform == "win32":
_A : List[str] = '*'
else:
_A : Dict = '➔ '
def A ( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : str = ""):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , SCREAMING_SNAKE_CASE)
else:
forceWrite(self.choices[index] , SCREAMING_SNAKE_CASE)
def A ( self : Any , SCREAMING_SNAKE_CASE : int):
if index == self.position:
forceWrite(F' {self.arrow_char} ')
self.write_choice(SCREAMING_SNAKE_CASE)
else:
forceWrite(F' {self.choices[index]}')
reset_cursor()
def A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Direction , SCREAMING_SNAKE_CASE : int = 1):
_A : Optional[int] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(SCREAMING_SNAKE_CASE)
move_cursor(SCREAMING_SNAKE_CASE , direction.name)
self.print_choice(self.position)
@input.mark(KEYMAP['up'])
def A ( self : Union[str, Any]):
self.move_direction(Direction.UP)
@input.mark(KEYMAP['down'])
def A ( self : Union[str, Any]):
self.move_direction(Direction.DOWN)
@input.mark(KEYMAP['newline'])
def A ( self : Dict):
move_cursor(len(self.choices) - self.position , 'DOWN')
return self.position
@input.mark(KEYMAP['interrupt'])
def A ( self : int):
move_cursor(len(self.choices) - self.position , 'DOWN')
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(SCREAMING_SNAKE_CASE)] for number in range(10)])
def A ( self : Dict):
_A : Any = int(chr(self.current_selection))
_A : Union[str, Any] = index - self.position
if index == self.position:
return
if index < len(self.choices):
if self.position > index:
self.move_direction(Direction.UP , -movement)
elif self.position < index:
self.move_direction(Direction.DOWN , SCREAMING_SNAKE_CASE)
else:
return
else:
return
def A ( self : int , SCREAMING_SNAKE_CASE : int = 0):
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n')
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n')
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n')
_A : str = default_choice
for i in range(len(self.choices)):
self.print_choice(SCREAMING_SNAKE_CASE)
forceWrite('\n')
move_cursor(len(self.choices) - self.position , 'UP')
with cursor.hide():
while True:
if in_colab:
try:
_A : str = int(builtins.input())
except ValueError:
_A : Any = default_choice
else:
_A : Any = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices) + 1):
move_cursor(1 , 'UP')
clear_line()
self.write_choice(SCREAMING_SNAKE_CASE , '\n')
return choice
| 128
| 1
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowerCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any=0.999 , __lowerCamelCase : List[Any]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowerCamelCase : Optional[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowerCamelCase : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i in range(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : str = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE : Optional[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowerCamelCase ) / alpha_bar_fn(__lowerCamelCase ) , __lowerCamelCase ) )
return torch.tensor(__lowerCamelCase , dtype=torch.floataa )
class _SCREAMING_SNAKE_CASE (__UpperCamelCase , __UpperCamelCase ):
lowerCAmelCase = [e.name for e in KarrasDiffusionSchedulers]
lowerCAmelCase = 2
@register_to_config
def __init__( self : Union[str, Any] , UpperCamelCase : int = 1_0_0_0 , UpperCamelCase : float = 0.0_0_0_8_5 , UpperCamelCase : float = 0.0_1_2 , UpperCamelCase : str = "linear" , UpperCamelCase : Optional[Union[np.ndarray, List[float]]] = None , UpperCamelCase : str = "epsilon" , UpperCamelCase : str = "linspace" , UpperCamelCase : int = 0 , )->Any:
if trained_betas is not None:
__SCREAMING_SNAKE_CASE : Dict = torch.tensor(UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.linspace(UpperCamelCase , UpperCamelCase , UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE : Dict = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE : str = betas_for_alpha_bar(UpperCamelCase )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
__SCREAMING_SNAKE_CASE : Dict = 1.0 - self.betas
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def __snake_case ( self : Any , UpperCamelCase : int , UpperCamelCase : List[Any]=None )->List[Any]:
if schedule_timesteps is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.timesteps
__SCREAMING_SNAKE_CASE : Optional[Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__SCREAMING_SNAKE_CASE : Tuple = 1 if len(UpperCamelCase ) > 1 else 0
else:
__SCREAMING_SNAKE_CASE : Dict = timestep.cpu().item() if torch.is_tensor(UpperCamelCase ) else timestep
__SCREAMING_SNAKE_CASE : int = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __snake_case ( self : List[str] )->Union[str, Any]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __snake_case ( self : List[str] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : Union[float, torch.FloatTensor] , )->torch.FloatTensor:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.index_for_timestep(UpperCamelCase )
if self.state_in_first_order:
__SCREAMING_SNAKE_CASE : str = self.sigmas[step_index]
else:
__SCREAMING_SNAKE_CASE : List[str] = self.sigmas_interpol[step_index]
__SCREAMING_SNAKE_CASE : List[Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __snake_case ( self : int , UpperCamelCase : int , UpperCamelCase : Union[str, torch.device] = None , UpperCamelCase : Optional[int] = None , )->Any:
__SCREAMING_SNAKE_CASE : Optional[int] = num_inference_steps
__SCREAMING_SNAKE_CASE : Optional[int] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__SCREAMING_SNAKE_CASE : Tuple = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase , dtype=UpperCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__SCREAMING_SNAKE_CASE : Optional[int] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE : Optional[Any] = (np.arange(0 , UpperCamelCase ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE : Optional[int] = (np.arange(UpperCamelCase , 0 , -step_ratio )).round().copy().astype(UpperCamelCase )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
__SCREAMING_SNAKE_CASE : Any = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__SCREAMING_SNAKE_CASE : Any = torch.from_numpy(np.log(UpperCamelCase ) ).to(UpperCamelCase )
__SCREAMING_SNAKE_CASE : Any = np.interp(UpperCamelCase , np.arange(0 , len(UpperCamelCase ) ) , UpperCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(UpperCamelCase ).to(device=UpperCamelCase )
# interpolate sigmas
__SCREAMING_SNAKE_CASE : Optional[Any] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__SCREAMING_SNAKE_CASE : List[Any] = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__SCREAMING_SNAKE_CASE : int = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCamelCase ).startswith("mps" ):
# mps does not support float64
__SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(UpperCamelCase ).to(UpperCamelCase , dtype=torch.floataa )
else:
__SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(UpperCamelCase ).to(UpperCamelCase )
# interpolate timesteps
__SCREAMING_SNAKE_CASE : str = self.sigma_to_t(UpperCamelCase ).to(UpperCamelCase , dtype=timesteps.dtype )
__SCREAMING_SNAKE_CASE : str = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__SCREAMING_SNAKE_CASE : Tuple = torch.cat([timesteps[:1], interleaved_timesteps] )
__SCREAMING_SNAKE_CASE : str = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__SCREAMING_SNAKE_CASE : Optional[int] = defaultdict(UpperCamelCase )
def __snake_case ( self : Tuple , UpperCamelCase : Dict )->int:
# get log sigma
__SCREAMING_SNAKE_CASE : List[str] = sigma.log()
# get distribution
__SCREAMING_SNAKE_CASE : Tuple = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__SCREAMING_SNAKE_CASE : Any = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = low_idx + 1
__SCREAMING_SNAKE_CASE : str = self.log_sigmas[low_idx]
__SCREAMING_SNAKE_CASE : Any = self.log_sigmas[high_idx]
# interpolate sigmas
__SCREAMING_SNAKE_CASE : List[Any] = (low - log_sigma) / (low - high)
__SCREAMING_SNAKE_CASE : Optional[int] = w.clamp(0 , 1 )
# transform interpolation to time range
__SCREAMING_SNAKE_CASE : Any = (1 - w) * low_idx + w * high_idx
__SCREAMING_SNAKE_CASE : Any = t.view(sigma.shape )
return t
@property
def __snake_case ( self : str )->Dict:
return self.sample is None
def __snake_case ( self : str , UpperCamelCase : Union[torch.FloatTensor, np.ndarray] , UpperCamelCase : Union[float, torch.FloatTensor] , UpperCamelCase : Union[torch.FloatTensor, np.ndarray] , UpperCamelCase : bool = True , )->Union[SchedulerOutput, Tuple]:
__SCREAMING_SNAKE_CASE : Dict = self.index_for_timestep(UpperCamelCase )
# advance index counter by 1
__SCREAMING_SNAKE_CASE : List[str] = timestep.cpu().item() if torch.is_tensor(UpperCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__SCREAMING_SNAKE_CASE : Tuple = self.sigmas[step_index]
__SCREAMING_SNAKE_CASE : str = self.sigmas_interpol[step_index + 1]
__SCREAMING_SNAKE_CASE : Dict = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__SCREAMING_SNAKE_CASE : Any = self.sigmas[step_index - 1]
__SCREAMING_SNAKE_CASE : int = self.sigmas_interpol[step_index]
__SCREAMING_SNAKE_CASE : List[str] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__SCREAMING_SNAKE_CASE : List[str] = 0
__SCREAMING_SNAKE_CASE : List[str] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE : List[Any] = sigma_hat if self.state_in_first_order else sigma_interpol
__SCREAMING_SNAKE_CASE : Optional[int] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__SCREAMING_SNAKE_CASE : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol
__SCREAMING_SNAKE_CASE : Tuple = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__SCREAMING_SNAKE_CASE : List[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__SCREAMING_SNAKE_CASE : Union[str, Any] = sigma_interpol - sigma_hat
# store for 2nd order step
__SCREAMING_SNAKE_CASE : List[Any] = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__SCREAMING_SNAKE_CASE : Tuple = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__SCREAMING_SNAKE_CASE : Dict = sigma_next - sigma_hat
__SCREAMING_SNAKE_CASE : List[str] = self.sample
__SCREAMING_SNAKE_CASE : int = None
__SCREAMING_SNAKE_CASE : Optional[int] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase )
def __snake_case ( self : Optional[int] , UpperCamelCase : torch.FloatTensor , UpperCamelCase : torch.FloatTensor , UpperCamelCase : torch.FloatTensor , )->torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase ):
# mps does not support float64
__SCREAMING_SNAKE_CASE : Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__SCREAMING_SNAKE_CASE : Optional[int] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = self.timesteps.to(original_samples.device )
__SCREAMING_SNAKE_CASE : Dict = timesteps.to(original_samples.device )
__SCREAMING_SNAKE_CASE : Any = [self.index_for_timestep(UpperCamelCase , UpperCamelCase ) for t in timesteps]
__SCREAMING_SNAKE_CASE : str = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__SCREAMING_SNAKE_CASE : List[Any] = sigma.unsqueeze(-1 )
__SCREAMING_SNAKE_CASE : Tuple = original_samples + noise * sigma
return noisy_samples
def __len__( self : int )->Optional[Any]:
return self.config.num_train_timesteps
| 708
|
from collections.abc import Generator
from math import sin
def _lowerCAmelCase ( __lowerCamelCase : bytes ):
"""simple docstring"""
if len(__lowerCamelCase ) != 32:
raise ValueError("Input must be of length 32" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _lowerCAmelCase ( __lowerCamelCase : int ):
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
__SCREAMING_SNAKE_CASE : Any = format(__lowerCamelCase , "08x" )[-8:]
__SCREAMING_SNAKE_CASE : Optional[Any] = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def _lowerCAmelCase ( __lowerCamelCase : bytes ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = b""
for char in message:
bit_string += format(__lowerCamelCase , "08b" ).encode("utf-8" )
__SCREAMING_SNAKE_CASE : List[str] = format(len(__lowerCamelCase ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__lowerCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def _lowerCAmelCase ( __lowerCamelCase : bytes ):
"""simple docstring"""
if len(__lowerCamelCase ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__lowerCamelCase ) , 512 ):
__SCREAMING_SNAKE_CASE : int = bit_string[pos : pos + 512]
__SCREAMING_SNAKE_CASE : Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def _lowerCAmelCase ( __lowerCamelCase : int ):
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = format(__lowerCamelCase , "032b" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__lowerCamelCase , 2 )
def _lowerCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
return (a + b) % 2**32
def _lowerCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def _lowerCAmelCase ( __lowerCamelCase : bytes ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = preprocess(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__SCREAMING_SNAKE_CASE : Tuple = 0X67452301
__SCREAMING_SNAKE_CASE : Optional[Any] = 0Xefcdab89
__SCREAMING_SNAKE_CASE : Optional[int] = 0X98badcfe
__SCREAMING_SNAKE_CASE : Optional[Any] = 0X10325476
__SCREAMING_SNAKE_CASE : List[Any] = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__lowerCamelCase ):
__SCREAMING_SNAKE_CASE : Any = aa
__SCREAMING_SNAKE_CASE : Union[str, Any] = ba
__SCREAMING_SNAKE_CASE : str = ca
__SCREAMING_SNAKE_CASE : Dict = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__SCREAMING_SNAKE_CASE : Union[str, Any] = d ^ (b & (c ^ d))
__SCREAMING_SNAKE_CASE : Optional[Any] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__SCREAMING_SNAKE_CASE : int = c ^ (d & (b ^ c))
__SCREAMING_SNAKE_CASE : int = (5 * i + 1) % 16
elif i <= 47:
__SCREAMING_SNAKE_CASE : List[str] = b ^ c ^ d
__SCREAMING_SNAKE_CASE : Union[str, Any] = (3 * i + 5) % 16
else:
__SCREAMING_SNAKE_CASE : Any = c ^ (b | not_aa(__lowerCamelCase ))
__SCREAMING_SNAKE_CASE : str = (7 * i) % 16
__SCREAMING_SNAKE_CASE : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32
__SCREAMING_SNAKE_CASE : Dict = d
__SCREAMING_SNAKE_CASE : str = c
__SCREAMING_SNAKE_CASE : Tuple = b
__SCREAMING_SNAKE_CASE : Optional[int] = sum_aa(__lowerCamelCase , left_rotate_aa(__lowerCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
__SCREAMING_SNAKE_CASE : Dict = sum_aa(__lowerCamelCase , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = sum_aa(__lowerCamelCase , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = sum_aa(__lowerCamelCase , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : str = sum_aa(__lowerCamelCase , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = reformat_hex(__lowerCamelCase ) + reformat_hex(__lowerCamelCase ) + reformat_hex(__lowerCamelCase ) + reformat_hex(__lowerCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 447
| 0
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __A :
@staticmethod
def _snake_case ( *UpperCamelCase_ , **UpperCamelCase_ ):
pass
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_a : List[str] = (
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class __A (unittest.TestCase ):
snake_case :List[str] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Tuple = pipeline(
"document-question-answering" , model=lowercase_ , tokenizer=lowercase_ , image_processor=lowercase_ )
__UpperCAmelCase : Dict = INVOICE_URL
__UpperCAmelCase : List[str] = list(zip(*apply_tesseract(load_image(lowercase_ ) , lowercase_ , "" ) ) )
__UpperCAmelCase : Optional[int] = "What is the placebo?"
__UpperCAmelCase : int = [
{
"image": load_image(lowercase_ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = dqa_pipeline(lowercase_ , top_k=2 )
self.assertEqual(
lowercase_ , [
[
{"score": ANY(lowercase_ ), "answer": ANY(lowercase_ ), "start": ANY(lowercase_ ), "end": ANY(lowercase_ )},
{"score": ANY(lowercase_ ), "answer": ANY(lowercase_ ), "start": ANY(lowercase_ ), "end": ANY(lowercase_ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self ):
__UpperCAmelCase : Tuple = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
__UpperCAmelCase : List[str] = INVOICE_URL
__UpperCAmelCase : Tuple = "How many cats are there?"
__UpperCAmelCase : int = [
{"score": 0.0_0_0_1, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_0_0_1, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
__UpperCAmelCase : int = dqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 )
self.assertEqual(nested_simplify(lowercase_ , decimals=4 ) , lowercase_ )
__UpperCAmelCase : Optional[int] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(lowercase_ , decimals=4 ) , lowercase_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__UpperCAmelCase : str = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__UpperCAmelCase : int = dqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 )
self.assertEqual(lowercase_ , [] )
# We can optionnally pass directly the words and bounding boxes
__UpperCAmelCase : List[Any] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Union[str, Any] = dqa_pipeline(image=lowercase_ , question=lowercase_ , words=lowercase_ , boxes=lowercase_ , top_k=2 )
self.assertEqual(lowercase_ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
__UpperCAmelCase : Union[str, Any] = INVOICE_URL
__UpperCAmelCase : Dict = "What is the invoice number?"
__UpperCAmelCase : Dict = dqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
] , )
__UpperCAmelCase : str = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
] , )
__UpperCAmelCase : str = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _snake_case ( self ):
__UpperCAmelCase : Tuple = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
__UpperCAmelCase : Optional[Any] = INVOICE_URL
__UpperCAmelCase : str = "What is the invoice number?"
__UpperCAmelCase : Dict = dqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
] , )
__UpperCAmelCase : Any = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
] , )
__UpperCAmelCase : List[Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _snake_case ( self ):
__UpperCAmelCase : int = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowercase_ )
__UpperCAmelCase : str = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowercase_ , revision="3dc6de3" , )
__UpperCAmelCase : Optional[int] = INVOICE_URL
__UpperCAmelCase : List[Any] = "What is the invoice number?"
__UpperCAmelCase : Optional[Any] = dqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
__UpperCAmelCase : List[str] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
__UpperCAmelCase : Optional[Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
__UpperCAmelCase : Optional[Any] = list(zip(*apply_tesseract(load_image(lowercase_ ) , lowercase_ , "" ) ) )
# This model should also work if `image` is set to None
__UpperCAmelCase : Optional[Any] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _snake_case ( self ):
__UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowercase_ )
__UpperCAmelCase : Tuple = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowercase_ , revision="3dc6de3" , max_seq_len=50 , )
__UpperCAmelCase : List[Any] = INVOICE_URL
__UpperCAmelCase : Dict = "What is the invoice number?"
__UpperCAmelCase : Union[str, Any] = dqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
] , )
__UpperCAmelCase : Tuple = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
__UpperCAmelCase : Dict = list(zip(*apply_tesseract(load_image(lowercase_ ) , lowercase_ , "" ) ) )
# This model should also work if `image` is set to None
__UpperCAmelCase : List[Any] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def _snake_case ( self ):
__UpperCAmelCase : Tuple = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
__UpperCAmelCase : Optional[int] = INVOICE_URL
__UpperCAmelCase : Dict = "What is the invoice number?"
__UpperCAmelCase : Tuple = dqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 )
self.assertEqual(nested_simplify(lowercase_ , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _snake_case ( self ):
pass
| 168
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
UpperCAmelCase__ : int = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase__ : str = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase__ : List[str] = {
"distilbert-base-uncased": 5_12,
"distilbert-base-uncased-distilled-squad": 5_12,
"distilbert-base-cased": 5_12,
"distilbert-base-cased-distilled-squad": 5_12,
"distilbert-base-german-cased": 5_12,
"distilbert-base-multilingual-cased": 5_12,
}
UpperCAmelCase__ : Dict = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = ['''input_ids''', '''attention_mask''']
__UpperCAmelCase = DistilBertTokenizer
def __init__( self , lowercase_=None , lowercase_=None , lowercase_=True , lowercase_="[UNK]" , lowercase_="[SEP]" , lowercase_="[PAD]" , lowercase_="[CLS]" , lowercase_="[MASK]" , lowercase_=True , lowercase_=None , **lowercase_ , ) -> Any:
super().__init__(
lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , tokenize_chinese_chars=lowercase_ , strip_accents=lowercase_ , **lowercase_ , )
__snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' , lowercase_) != do_lower_case
or normalizer_state.get('strip_accents' , lowercase_) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowercase_) != tokenize_chinese_chars
):
__snake_case = getattr(lowercase_ , normalizer_state.pop('type'))
__snake_case = do_lower_case
__snake_case = strip_accents
__snake_case = tokenize_chinese_chars
__snake_case = normalizer_class(**lowercase_)
__snake_case = do_lower_case
def _a ( self , lowercase_ , lowercase_=None) -> Union[str, Any]:
__snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self , lowercase_ , lowercase_ = None) -> List[int]:
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _a ( self , lowercase_ , lowercase_ = None) -> Tuple[str]:
__snake_case = self._tokenizer.model.save(lowercase_ , name=lowercase_)
return tuple(lowercase_)
| 313
| 0
|
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
a_ = datasets.load_iris()
a_ = np.array(data['data'])
a_ = np.array(data['target'])
a_ = data['target_names']
a_ , a_ , a_ , a_ = train_test_split(X, y)
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return np.linalg.norm(np.array(UpperCamelCase__ ) - np.array(UpperCamelCase__ ) )
def _a( UpperCamelCase__ : str, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Any=5 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =zip(UpperCamelCase__, UpperCamelCase__ )
# List of distances of all points from the point to be classified
SCREAMING_SNAKE_CASE__ : Tuple =[]
for data_point in data:
SCREAMING_SNAKE_CASE__ : str =euclidean_distance(data_point[0], UpperCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
SCREAMING_SNAKE_CASE__ : Optional[Any] =[i[1] for i in sorted(UpperCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
SCREAMING_SNAKE_CASE__ : Any =Counter(UpperCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 707
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
a_ = list[list[float | int]]
def _a( UpperCamelCase__ : Matrix, UpperCamelCase__ : Matrix ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(size + 1 )] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : float
for row in range(UpperCamelCase__ ):
for col in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Tuple =matrix[row][col]
SCREAMING_SNAKE_CASE__ : Optional[int] =vector[row][0]
SCREAMING_SNAKE_CASE__ : Any =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =0
while row < size and col < size:
# pivoting
SCREAMING_SNAKE_CASE__ : Any =max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCamelCase__, UpperCamelCase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[pivot_row], augmented[row]
for rowa in range(row + 1, UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =augmented[rowa][col] / augmented[row][col]
SCREAMING_SNAKE_CASE__ : Tuple =0
for cola in range(col + 1, size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1, UpperCamelCase__ ):
for row in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =augmented[row][col] / augmented[col][col]
for cola in range(UpperCamelCase__, size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row], 1_0 )] for row in range(UpperCamelCase__ )
]
def _a( UpperCamelCase__ : list[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Matrix =[[0 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Matrix =[[0] for _ in range(UpperCamelCase__ )]
SCREAMING_SNAKE_CASE__ : Matrix
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
for x_val, y_val in enumerate(UpperCamelCase__ ):
for col in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] =(x_val + 1) ** (size - col - 1)
SCREAMING_SNAKE_CASE__ : Dict =y_val
SCREAMING_SNAKE_CASE__ : Optional[int] =solve(UpperCamelCase__, UpperCamelCase__ )
def interpolated_func(UpperCamelCase__ : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(UpperCamelCase__ ) )
return interpolated_func
def _a( UpperCamelCase__ : int ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**1_0
)
def _a( UpperCamelCase__ : Callable[[int], int] = question_function, UpperCamelCase__ : int = 1_0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : list[int] =[func(UpperCamelCase__ ) for x_val in range(1, order + 1 )]
SCREAMING_SNAKE_CASE__ : list[Callable[[int], int]] =[
interpolate(data_points[:max_coeff] ) for max_coeff in range(1, order + 1 )
]
SCREAMING_SNAKE_CASE__ : int =0
SCREAMING_SNAKE_CASE__ : Callable[[int], int]
SCREAMING_SNAKE_CASE__ : int
for poly in polynomials:
SCREAMING_SNAKE_CASE__ : Any =1
while func(UpperCamelCase__ ) == poly(UpperCamelCase__ ):
x_val += 1
ret += poly(UpperCamelCase__ )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 665
| 0
|
'''simple docstring'''
UpperCAmelCase : List[Any] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = input("""Enter message: """ )
__SCREAMING_SNAKE_CASE = input("""Enter key [alphanumeric]: """ )
__SCREAMING_SNAKE_CASE = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
__SCREAMING_SNAKE_CASE = '''encrypt'''
__SCREAMING_SNAKE_CASE = encrypt_message(__UpperCamelCase , __UpperCamelCase )
elif mode.lower().startswith("""d""" ):
__SCREAMING_SNAKE_CASE = '''decrypt'''
__SCREAMING_SNAKE_CASE = decrypt_message(__UpperCamelCase , __UpperCamelCase )
print(F'\n{mode.title()}ed message:' )
print(__UpperCamelCase )
def a__ ( a__ , a__ ):
"""simple docstring"""
return translate_message(__UpperCamelCase , __UpperCamelCase , """encrypt""" )
def a__ ( a__ , a__ ):
"""simple docstring"""
return translate_message(__UpperCamelCase , __UpperCamelCase , """decrypt""" )
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = key.upper()
for symbol in message:
__SCREAMING_SNAKE_CASE = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__UpperCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__UpperCamelCase ):
__SCREAMING_SNAKE_CASE = 0
else:
translated.append(__UpperCamelCase )
return "".join(__UpperCamelCase )
if __name__ == "__main__":
main()
| 627
|
"""simple docstring"""
from math import pi, sqrt, tan
def __UpperCAmelCase ( __UpperCamelCase ):
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __UpperCAmelCase ( __UpperCamelCase ):
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def __UpperCAmelCase ( __UpperCamelCase ):
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
__lowercase : List[str] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(__UpperCamelCase , 2 ) * torus_radius * tube_radius
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def __UpperCAmelCase ( __UpperCamelCase ):
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
__lowercase : int = (sidea + sidea + sidea) / 2
__lowercase : List[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def __UpperCAmelCase ( __UpperCamelCase ):
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(F"Rectangle: {area_rectangle(1_0, 2_0) = }")
print(F"Square: {area_square(1_0) = }")
print(F"Triangle: {area_triangle(1_0, 1_0) = }")
print(F"Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }")
print(F"Parallelogram: {area_parallelogram(1_0, 2_0) = }")
print(F"Rhombus: {area_rhombus(1_0, 2_0) = }")
print(F"Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }")
print(F"Circle: {area_circle(2_0) = }")
print(F"Ellipse: {area_ellipse(1_0, 2_0) = }")
print('\nSurface Areas of various geometric shapes: \n')
print(F"Cube: {surface_area_cube(2_0) = }")
print(F"Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }")
print(F"Sphere: {surface_area_sphere(2_0) = }")
print(F"Hemisphere: {surface_area_hemisphere(2_0) = }")
print(F"Cone: {surface_area_cone(1_0, 2_0) = }")
print(F"Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }")
print(F"Cylinder: {surface_area_cylinder(1_0, 2_0) = }")
print(F"Torus: {surface_area_torus(2_0, 1_0) = }")
print(F"Equilateral Triangle: {area_reg_polygon(3, 1_0) = }")
print(F"Square: {area_reg_polygon(4, 1_0) = }")
print(F"Reqular Pentagon: {area_reg_polygon(5, 1_0) = }")
| 76
| 0
|
'''simple docstring'''
def _snake_case ( A ) -> List[str]:
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
lowerCAmelCase__ = 4
lowerCAmelCase__ = (1 << p) - 1
for _ in range(p - 2 ):
lowerCAmelCase__ = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 707
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
__UpperCAmelCase = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase = '''RegNetConfig'''
# Base docstring
__UpperCAmelCase = '''facebook/regnet-y-040'''
__UpperCAmelCase = [1, 1_088, 7, 7]
# Image classification docstring
__UpperCAmelCase = '''facebook/regnet-y-040'''
__UpperCAmelCase = '''tabby, tabby cat'''
__UpperCAmelCase = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 3 , lowerCamelCase_ = 1 , lowerCamelCase_ = 1 , lowerCamelCase_ = "relu" , ) -> int:
super().__init__()
lowerCAmelCase__ = nn.Convad(
lowerCamelCase_ , lowerCamelCase_ , kernel_size=lowerCamelCase_ , stride=lowerCamelCase_ , padding=kernel_size // 2 , groups=lowerCamelCase_ , bias=lowerCamelCase_ , )
lowerCAmelCase__ = nn.BatchNormad(lowerCamelCase_ )
lowerCAmelCase__ = ACTaFN[activation] if activation is not None else nn.Identity()
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Dict:
lowerCAmelCase__ = self.convolution(lowerCamelCase_ )
lowerCAmelCase__ = self.normalization(lowerCamelCase_ )
lowerCAmelCase__ = self.activation(lowerCamelCase_ )
return hidden_state
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Optional[Any]:
super().__init__()
lowerCAmelCase__ = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowerCAmelCase__ = config.num_channels
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
lowerCAmelCase__ = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
lowerCAmelCase__ = self.embedder(lowerCamelCase_ )
return hidden_state
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 2 ) -> Any:
super().__init__()
lowerCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , stride=lowerCamelCase_ , bias=lowerCamelCase_ )
lowerCAmelCase__ = nn.BatchNormad(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Tensor:
lowerCAmelCase__ = self.convolution(lowerCamelCase_ )
lowerCAmelCase__ = self.normalization(lowerCamelCase_ )
return hidden_state
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
super().__init__()
lowerCAmelCase__ = nn.AdaptiveAvgPoolad((1, 1) )
lowerCAmelCase__ = nn.Sequential(
nn.Convad(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 ) , nn.Sigmoid() , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
# b c h w -> b c 1 1
lowerCAmelCase__ = self.pooler(lowerCamelCase_ )
lowerCAmelCase__ = self.attention(lowerCamelCase_ )
lowerCAmelCase__ = hidden_state * attention
return hidden_state
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 ) -> Optional[int]:
super().__init__()
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
RegNetShortCut(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase__ = nn.Sequential(
RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , groups=lowerCamelCase_ , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ ) , )
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = hidden_state
lowerCAmelCase__ = self.layer(lowerCamelCase_ )
lowerCAmelCase__ = self.shortcut(lowerCamelCase_ )
hidden_state += residual
lowerCAmelCase__ = self.activation(lowerCamelCase_ )
return hidden_state
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 ) -> Optional[int]:
super().__init__()
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
RegNetShortCut(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase__ = nn.Sequential(
RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , groups=lowerCamelCase_ , activation=config.hidden_act ) , RegNetSELayer(lowerCamelCase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowerCamelCase_ , lowerCamelCase_ , kernel_size=1 , activation=lowerCamelCase_ ) , )
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Union[str, Any]:
lowerCAmelCase__ = hidden_state
lowerCAmelCase__ = self.layer(lowerCamelCase_ )
lowerCAmelCase__ = self.shortcut(lowerCamelCase_ )
hidden_state += residual
lowerCAmelCase__ = self.activation(lowerCamelCase_ )
return hidden_state
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 2 , lowerCamelCase_ = 2 , ) -> Dict:
super().__init__()
lowerCAmelCase__ = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
lowerCAmelCase__ = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , stride=lowerCamelCase_ , ) , *[layer(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) for _ in range(depth - 1 )] , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = self.layers(lowerCamelCase_ )
return hidden_state
class a__ ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Optional[int]:
super().__init__()
lowerCAmelCase__ = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowerCamelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowerCAmelCase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCamelCase_ , config.depths[1:] ):
self.stages.append(RegNetStage(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , depth=lowerCamelCase_ ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = False , lowerCamelCase_ = True ) -> BaseModelOutputWithNoAttention:
lowerCAmelCase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
lowerCAmelCase__ = stage_module(lowerCamelCase_ )
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase_ , hidden_states=lowerCamelCase_ )
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : List[Any] = RegNetConfig
lowercase__ : Tuple = "regnet"
lowercase__ : List[str] = "pixel_values"
lowercase__ : Tuple = True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
if isinstance(lowerCamelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(lowerCamelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=False ) -> int:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = value
__UpperCAmelCase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
__UpperCAmelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , a__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class a__ ( a__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Optional[int]:
super().__init__(lowerCamelCase_ )
lowerCAmelCase__ = config
lowerCAmelCase__ = RegNetEmbeddings(lowerCamelCase_ )
lowerCAmelCase__ = RegNetEncoder(lowerCamelCase_ )
lowerCAmelCase__ = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.embedder(lowerCamelCase_ )
lowerCAmelCase__ = self.encoder(
lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ )
lowerCAmelCase__ = encoder_outputs[0]
lowerCAmelCase__ = self.pooler(lowerCamelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCamelCase_ , pooler_output=lowerCamelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , a__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class a__ ( a__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> Optional[Any]:
super().__init__(lowerCamelCase_ )
lowerCAmelCase__ = config.num_labels
lowerCAmelCase__ = RegNetModel(lowerCamelCase_ )
# classification head
lowerCAmelCase__ = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> ImageClassifierOutputWithNoAttention:
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.regnet(lowerCamelCase_ , output_hidden_states=lowerCamelCase_ , return_dict=lowerCamelCase_ )
lowerCAmelCase__ = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ = self.classifier(lowerCamelCase_ )
lowerCAmelCase__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase__ = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase__ = '''single_label_classification'''
else:
lowerCAmelCase__ = '''multi_label_classification'''
if self.config.problem_type == "regression":
lowerCAmelCase__ = MSELoss()
if self.num_labels == 1:
lowerCAmelCase__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCAmelCase__ = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase__ = CrossEntropyLoss()
lowerCAmelCase__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase__ = BCEWithLogitsLoss()
lowerCAmelCase__ = loss_fct(lowerCamelCase_ , lowerCamelCase_ )
if not return_dict:
lowerCAmelCase__ = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCamelCase_ , logits=lowerCamelCase_ , hidden_states=outputs.hidden_states )
| 98
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = credit_card_number
_UpperCAmelCase = 0
_UpperCAmelCase = len(lowercase ) - 2
for i in range(lowercase ,-1 ,-2 ):
# double the value of every second digit
_UpperCAmelCase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_UpperCAmelCase = cc_number[:i] + str(lowercase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(lowercase ) - 1 ,-1 ,-2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = f'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(f'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(lowercase ) <= 16:
print(f'''{error_message} of its length.''' )
return False
if not validate_initial_digits(lowercase ):
print(f'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(lowercase ):
print(f'''{error_message} it fails the Luhn check.''' )
return False
print(f'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 277
|
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : str=2 , __lowerCAmelCase : List[Any]=8 , __lowerCAmelCase : Dict=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : str=True , __lowerCAmelCase : Optional[Any]=99 , __lowerCAmelCase : Optional[Any]=16 , __lowerCAmelCase : Optional[int]=5 , __lowerCAmelCase : Optional[int]=2 , __lowerCAmelCase : Optional[int]=36 , __lowerCAmelCase : Any="gelu" , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : int=0.0 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : List[str]=16 , __lowerCAmelCase : int=2 , __lowerCAmelCase : Any=0.02 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : str=4 , __lowerCAmelCase : Dict=None , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Any ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = 300
return config
def lowerCAmelCase_ ( self : Any ):
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = self.prepare_config_and_inputs()
_UpperCAmelCase = True
_UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = MraModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
_UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , ):
_UpperCAmelCase = True
_UpperCAmelCase = MraModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , encoder_attention_mask=__lowerCAmelCase , )
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase , )
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : str ):
_UpperCAmelCase = MraForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : int , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : int ):
_UpperCAmelCase = MraForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MraForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MraForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[Any] ):
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = MraForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : str = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
_snake_case : Dict = False
_snake_case : Any = False
_snake_case : List[str] = False
_snake_case : Tuple = False
_snake_case : int = ()
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = MraModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = MraModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@unittest.skip(reason="""MRA does not output attentions""" )
def lowerCAmelCase_ ( self : str ):
return
@require_torch
class a ( unittest.TestCase ):
@slow
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
_UpperCAmelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
@slow
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
_UpperCAmelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = 5_0265
_UpperCAmelCase = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
@slow
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
_UpperCAmelCase = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
_UpperCAmelCase = model(__lowerCAmelCase )[0]
_UpperCAmelCase = 5_0265
_UpperCAmelCase = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , __lowerCAmelCase )
_UpperCAmelCase = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCAmelCase , atol=1e-4 ) )
| 277
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 718
|
import math
UpperCamelCase = 1_0
UpperCamelCase = 7
UpperCamelCase = BALLS_PER_COLOUR * NUM_COLOURS
def _a ( lowerCamelCase__ = 20 ) -> str:
lowerCamelCase_ : List[str] = math.comb(lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ : int = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase__ )
lowerCamelCase_ : int = NUM_COLOURS * (1 - missing_colour / total)
return F'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0))
| 144
| 0
|
'''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = 'EncodecFeatureExtractor'
lowerCamelCase : Any = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.feature_extractor
__lowerCamelCase : List[str] = False
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True ) -> List[Any]:
return self.tokenizer.get_decoder_prompt_ids(task=SCREAMING_SNAKE_CASE_ , language=SCREAMING_SNAKE_CASE_ , no_timestamps=SCREAMING_SNAKE_CASE_ )
def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = kwargs.pop('audio' , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = kwargs.pop('sampling_rate' , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = kwargs.pop('text' , SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
__lowerCamelCase : Union[str, Any] = args[0]
__lowerCamelCase : List[str] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if text is not None:
__lowerCamelCase : List[Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if audio is not None:
__lowerCamelCase : Any = self.feature_extractor(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__lowerCamelCase : Any = audio_inputs['input_values']
if "padding_mask" in audio_inputs:
__lowerCamelCase : List[Any] = audio_inputs['padding_mask']
return inputs
def lowercase_ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple:
__lowerCamelCase : int = kwargs.pop('audio' , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = kwargs.pop('padding_mask' , SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
__lowerCamelCase : Union[str, Any] = args[0]
__lowerCamelCase : Any = args[1:]
if audio_values is not None:
return self._decode_audio(SCREAMING_SNAKE_CASE_ , padding_mask=SCREAMING_SNAKE_CASE_ )
else:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> int:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[np.ndarray]:
__lowerCamelCase : Optional[Any] = to_numpy(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = audio_values.shape
if padding_mask is None:
return list(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = to_numpy(SCREAMING_SNAKE_CASE_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__lowerCamelCase : int = seq_len - padding_mask.shape[-1]
__lowerCamelCase : Union[str, Any] = 1 - self.feature_extractor.padding_value
__lowerCamelCase : str = np.pad(SCREAMING_SNAKE_CASE_ , ((0, 0), (0, difference)) , 'constant' , constant_values=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = audio_values.tolist()
for i in range(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__lowerCamelCase : int = sliced_audio.reshape(SCREAMING_SNAKE_CASE_ , -1 )
return audio_values
| 13
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _snake_case ( A , A , A , A , A ) -> np.array:
lowerCAmelCase__ = int(np.ceil((x_end - xa) / step_size ) )
lowerCAmelCase__ = np.zeros((n + 1,) )
lowerCAmelCase__ = ya
lowerCAmelCase__ = xa
for k in range(A ):
lowerCAmelCase__ = y[k] + step_size * ode_func(A , y[k] )
lowerCAmelCase__ = y[k] + (
(step_size / 2) * (ode_func(A , y[k] ) + ode_func(x + step_size , A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90
| 0
|
import warnings
warnings.warn(
"""memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """
"""`from accelerate import find_executable_batch_size` to avoid this warning.""",
FutureWarning,
)
| 703
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowerCamelCase : Dict = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
snake_case__ : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 25
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class __lowercase (a_ , a_ ):
_lowerCamelCase = '''resnet'''
_lowerCamelCase = ['''basic''', '''bottleneck''']
def __init__( self : int , UpperCAmelCase_ : List[Any]=3 , UpperCAmelCase_ : Dict=64 , UpperCAmelCase_ : Any=[256, 512, 1_024, 2_048] , UpperCAmelCase_ : int=[3, 4, 6, 3] , UpperCAmelCase_ : Dict="bottleneck" , UpperCAmelCase_ : int="relu" , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Union[str, Any]=None , **UpperCAmelCase_ : Optional[Any] , ):
super().__init__(**lowercase_)
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types)}')
UpperCamelCase__ : Dict = num_channels
UpperCamelCase__ : Optional[Any] = embedding_size
UpperCamelCase__ : List[Any] = hidden_sizes
UpperCamelCase__ : Any = depths
UpperCamelCase__ : Union[str, Any] = layer_type
UpperCamelCase__ : List[str] = hidden_act
UpperCamelCase__ : Tuple = downsample_in_first_stage
UpperCamelCase__ : int = ['stem'] + [F'stage{idx}' for idx in range(1 , len(lowercase_) + 1)]
UpperCamelCase__, UpperCamelCase__ : Dict = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names)
class __lowercase (a_ ):
_lowerCamelCase = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self : Tuple):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __UpperCamelCase ( self : List[Any]):
return 1e-3
| 596
|
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase_ = logging.getLogger()
def lowerCamelCase ( a_ ) -> Optional[int]:
lowerCAmelCase_ = {}
lowerCAmelCase_ = os.path.join(a_ , 'all_results.json' )
if os.path.exists(a_ ):
with open(a_ , 'r' ) as f:
lowerCAmelCase_ = json.load(a_ )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
lowerCamelCase_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class a_ ( a_ ):
'''simple docstring'''
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
import xla_spawn
lowerCAmelCase_ = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ = f'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowercase_ , 'argv' , lowercase_ ):
lowerCAmelCase_ = time()
xla_spawn.main()
lowerCAmelCase_ = time()
lowerCAmelCase_ = get_results(lowercase_ )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_0_0 )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
import xla_spawn
lowerCAmelCase_ = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(lowercase_ , 'argv' , lowercase_ ):
xla_spawn.main()
| 318
| 0
|
'''simple docstring'''
import requests
UpperCamelCase__ : Dict = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='''
def __UpperCamelCase( _A : List[str] ):
'''simple docstring'''
# fetching a list of articles in json format
UpperCAmelCase__ : Any = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(F'''{i}.) {article['title']}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>')
| 711
|
'''simple docstring'''
def __UpperCamelCase( _A : str , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : int = len(_A )
UpperCAmelCase__ : int = len(_A )
UpperCAmelCase__ : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
UpperCAmelCase__ : list = []
for char_count in range(_A ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_A )
if __name__ == "__main__":
print(alternative_string_arrange('AB', 'XYZ'), end=' ')
| 496
| 0
|
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Any = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def a_ ( __lowercase : str , __lowercase : str ) -> Dict:
_snake_case = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1_024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1_024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1E-5,
"token_type_vocab_size": 2,
}
_snake_case = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
_snake_case = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=lowercase__ , output_all_encodings=lowercase__ , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , lowercase__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
_snake_case = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
_snake_case = os.path.join(get_home_dir() , 'models' )
_snake_case = _load_vocab(lowercase__ , lowercase__ , lowercase__ , cls=lowercase__ )
_snake_case = nlp.model.BERTModel(
lowercase__ , len(lowercase__ ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=lowercase__ , use_token_type_embed=lowercase__ , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=lowercase__ , use_decoder=lowercase__ , )
original_bort.load_parameters(lowercase__ , cast_dtype=lowercase__ , ignore_extra=lowercase__ )
_snake_case = original_bort._collect_params_with_prefix()
# Build our config 🤗
_snake_case = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.0_2,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(lowercase__ ),
}
_snake_case = BertConfig.from_dict(lowercase__ )
_snake_case = BertForMaskedLM(lowercase__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__lowercase : Union[str, Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__lowercase : Tuple , __lowercase : Any ):
_snake_case = hf_param.shape
_snake_case = to_torch(params[gluon_param] )
_snake_case = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
_snake_case = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
_snake_case = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
_snake_case = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
_snake_case = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
_snake_case = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
_snake_case = hf_bort_model.bert.encoder.layer[i]
# self attention
_snake_case = layer.attention.self
_snake_case = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
_snake_case = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
_snake_case = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
_snake_case = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
_snake_case = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
_snake_case = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
_snake_case = layer.attention.output
_snake_case = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
_snake_case = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
_snake_case = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
_snake_case = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
_snake_case = layer.intermediate
_snake_case = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
_snake_case = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
_snake_case = layer.output
_snake_case = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
_snake_case = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
_snake_case = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
_snake_case = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
_snake_case = RobertaTokenizer.from_pretrained('roberta-base' )
_snake_case = tokenizer.encode_plus(lowercase__ )["input_ids"]
# Get gluon output
_snake_case = mx.nd.array([input_ids] )
_snake_case = original_bort(inputs=lowercase__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowercase__ )
_snake_case = BertModel.from_pretrained(lowercase__ )
hf_bort_model.eval()
_snake_case = tokenizer.encode_plus(lowercase__ , return_tensors='pt' )
_snake_case = hf_bort_model(**lowercase__ )[0]
_snake_case = output_gluon[0].asnumpy()
_snake_case = output_hf[0].detach().numpy()
_snake_case = np.max(np.abs(hf_layer - gluon_layer ) ).item()
_snake_case = np.allclose(lowercase__ , lowercase__ , atol=1E-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , lowercase__ )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 686
|
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowerCamelCase__ ( lowerCamelCase_ ):
@require_torch
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
snake_case : List[str] = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
snake_case : Optional[int] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
snake_case : str = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(SCREAMING_SNAKE_CASE )
BertModel.from_pretrained(SCREAMING_SNAKE_CASE )
BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
pipeline(task="fill-mask" , model=SCREAMING_SNAKE_CASE )
# baseline - just load from_pretrained with normal network
snake_case : str = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
snake_case : Optional[Any] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case : List[Any] = "1"
snake_case : Any = subprocess.run(SCREAMING_SNAKE_CASE , env=SCREAMING_SNAKE_CASE , check=SCREAMING_SNAKE_CASE , capture_output=SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Optional[int] = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
snake_case : str = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
snake_case : Optional[Any] = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
snake_case : List[str] = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(SCREAMING_SNAKE_CASE )
BertModel.from_pretrained(SCREAMING_SNAKE_CASE )
BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
pipeline(task="fill-mask" , model=SCREAMING_SNAKE_CASE )
# baseline - just load from_pretrained with normal network
snake_case : Dict = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
snake_case : Optional[Any] = self.get_env()
snake_case : Tuple = subprocess.run(SCREAMING_SNAKE_CASE , env=SCREAMING_SNAKE_CASE , check=SCREAMING_SNAKE_CASE , capture_output=SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Dict = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
snake_case : Optional[int] = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
snake_case : str = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
snake_case : Optional[Any] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
snake_case : Union[str, Any] = self.get_env()
snake_case : Any = subprocess.run(SCREAMING_SNAKE_CASE , env=SCREAMING_SNAKE_CASE , check=SCREAMING_SNAKE_CASE , capture_output=SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
snake_case : Optional[int] = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case : Optional[int] = "1"
snake_case : List[Any] = subprocess.run(SCREAMING_SNAKE_CASE , env=SCREAMING_SNAKE_CASE , check=SCREAMING_SNAKE_CASE , capture_output=SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[Any] = "\nfrom transformers import pipeline\n "
snake_case : Dict = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
snake_case : int = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
snake_case : Union[str, Any] = self.get_env()
snake_case : List[Any] = "1"
snake_case : Tuple = [sys.executable, "-c", "\n".join([load, mock, run] )]
snake_case : Union[str, Any] = subprocess.run(SCREAMING_SNAKE_CASE , env=SCREAMING_SNAKE_CASE , check=SCREAMING_SNAKE_CASE , capture_output=SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[Any] = "\nfrom transformers import AutoModel\n "
snake_case : Union[str, Any] = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
snake_case : Optional[Any] = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
snake_case : str = self.get_env()
snake_case : int = subprocess.run(SCREAMING_SNAKE_CASE , env=SCREAMING_SNAKE_CASE , check=SCREAMING_SNAKE_CASE , capture_output=SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case : Any = "1"
snake_case : List[str] = subprocess.run(SCREAMING_SNAKE_CASE , env=SCREAMING_SNAKE_CASE , check=SCREAMING_SNAKE_CASE , capture_output=SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 134
| 0
|
"""simple docstring"""
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCAmelCase ( a_, a_, a_, a_, a_, a_, a_, a_, a_, ):
'''simple docstring'''
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowerCamelCase : Dict = cst_fwd.get(a_, np.inf )
lowerCamelCase : Dict = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowerCamelCase : Tuple = new_cost_f
lowerCamelCase : Optional[int] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowerCamelCase : int = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCAmelCase ( a_, a_, a_, a_ ):
'''simple docstring'''
lowerCamelCase : Tuple = -1
lowerCamelCase : Optional[int] = set()
lowerCamelCase : int = set()
lowerCamelCase : List[Any] = {source: 0}
lowerCamelCase : Union[str, Any] = {destination: 0}
lowerCamelCase : int = {source: None}
lowerCamelCase : str = {destination: None}
lowerCamelCase : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase : PriorityQueue[Any] = PriorityQueue()
lowerCamelCase : Dict = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowerCamelCase , lowerCamelCase : Union[str, Any] = queue_forward.get()
visited_forward.add(a_ )
lowerCamelCase , lowerCamelCase : Tuple = queue_backward.get()
visited_backward.add(a_ )
lowerCamelCase : int = pass_and_relaxation(
a_, a_, a_, a_, a_, a_, a_, a_, a_, )
lowerCamelCase : List[Any] = pass_and_relaxation(
a_, a_, a_, a_, a_, a_, a_, a_, a_, )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowerCamelCase : Optional[Any] = shortest_distance
return shortest_path_distance
_A = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
_A = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 133
|
"""simple docstring"""
import os
import pytest
from attr import dataclass
_A = 'us-east-1' # defaults region
@dataclass
class _lowercase :
lowercase_ = 42
lowercase_ = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
lowercase_ = {
'task_name': 'mnli',
'per_device_train_batch_size': 1_6,
'per_device_eval_batch_size': 1_6,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_0_0,
'save_steps': 5_5_0_0,
}
lowercase_ = {**hyperparameters, 'max_steps': 1_0_0_0}
@property
def _UpperCamelCase ( self ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def _UpperCamelCase ( self ) -> str:
return F"""{self.framework}-transfromers-test"""
@property
def _UpperCamelCase ( self ) -> str:
return F"""./tests/sagemaker/scripts/{self.framework}"""
@property
def _UpperCamelCase ( self ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = SageMakerTestEnvironment(framework=request.cls.framework )
| 133
| 1
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__a : List[Any] = "platform"
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[int] , __lowercase : str , __lowercase : Union[str, Any]=None , __lowercase : str=None , __lowercase : Optional[Any]=None , __lowercase : Optional[int]=None , __lowercase : Union[str, Any]=None , __lowercase : Dict=None , ) -> List[str]:
"""simple docstring"""
if attention_mask is None:
__A = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__A = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__A = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__A = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__A = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __lowercase :
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Dict=13 , UpperCamelCase_ : Any=7 , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : str=99 , UpperCamelCase_ : int=16 , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : Union[str, Any]=4 , UpperCamelCase_ : Dict=4 , UpperCamelCase_ : Optional[Any]="gelu" , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : List[Any]=0.1 , UpperCamelCase_ : Any=32 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : int=1 , UpperCamelCase_ : int=0 , UpperCamelCase_ : Optional[int]=0.02 , ):
"""simple docstring"""
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = eos_token_id
__A = pad_token_id
__A = bos_token_id
__A = initializer_range
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__A = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__A = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__A = shift_tokens_right(UpperCamelCase_ , 1 , 2 )
__A = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCamelCase_ , )
__A = prepare_blenderbot_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A , __A = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase_ ( self : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ):
"""simple docstring"""
__A = 20
__A = model_class_name(UpperCamelCase_ )
__A = model.encode(inputs_dict["""input_ids"""] )
__A , __A = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__A = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
__A = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__A = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__A = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
__A = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__A = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase_ , )
__A = model.decode(UpperCamelCase_ , UpperCamelCase_ )
__A = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
def lowerCAmelCase_ ( self : Any , UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] ):
"""simple docstring"""
__A = 20
__A = model_class_name(UpperCamelCase_ )
__A = model.encode(inputs_dict["""input_ids"""] )
__A , __A = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
__A = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__A = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
__A = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__A = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
__A = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
__A = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
__A = model.decode(UpperCamelCase_ , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ )
__A = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"Max diff is {diff}" )
@require_flax
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 99
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
__A = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__A = input_ids.shape[0]
__A = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A , __A , __A = self._get_config_and_data()
__A = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ )
__A = lm_model(input_ids=UpperCamelCase_ )
__A = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , UpperCamelCase_ )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
__A = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__A = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ )
__A = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__A = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__A = lm_model(input_ids=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ )
__A = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape , UpperCamelCase_ )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__A = shift_tokens_right(UpperCamelCase_ , 1 , 2 )
__A = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum()
__A = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCamelCase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __lowercase ( lowercase_ , unittest.TestCase , lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
__A = FlaxBlenderbotModelTester(self )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A , __A = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A , __A = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__A = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
__A = model_class(UpperCamelCase_ )
@jax.jit
def encode_jitted(UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : List[str] ):
return model.encode(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
with self.subTest("""JIT Enabled""" ):
__A = encode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__A = encode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__A = model_class(UpperCamelCase_ )
__A = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
__A = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase_ : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : int ):
return model.decode(
decoder_input_ids=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , encoder_outputs=UpperCamelCase_ , )
with self.subTest("""JIT Enabled""" ):
__A = decode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__A = decode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__A = model_class_name.from_pretrained("""facebook/blenderbot-400M-distill""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__A = np.ones((1, 1) ) * model.config.eos_token_id
__A = model(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skipUnless(jax_device != """cpu""" , """3B test too slow on CPU.""" )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = {"""num_beams""": 1, """early_stopping""": True, """min_length""": 15, """max_length""": 25}
__A = {"""skip_special_tokens""": True, """clean_up_tokenization_spaces""": True}
__A = FlaxBlenderbotForConditionalGeneration.from_pretrained("""facebook/blenderbot-3B""" , from_pt=UpperCamelCase_ )
__A = BlenderbotTokenizer.from_pretrained("""facebook/blenderbot-3B""" )
__A = ["""Sam"""]
__A = tokenizer(UpperCamelCase_ , return_tensors="""jax""" )
__A = model.generate(**UpperCamelCase_ , **UpperCamelCase_ )
__A = """Sam is a great name. It means \"sun\" in Gaelic."""
__A = tokenizer.batch_decode(UpperCamelCase_ , **UpperCamelCase_ )
assert generated_txt[0].strip() == tgt_text
| 637
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : Optional[Any] = logging.get_logger(__name__)
__a : Dict = {
"google/realm-cc-news-pretrained-embedder": (
"https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-encoder": (
"https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-scorer": (
"https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"
),
"google/realm-cc-news-pretrained-openqa": (
"https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"
),
"google/realm-orqa-nq-openqa": "https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json",
"google/realm-orqa-nq-reader": "https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json",
"google/realm-orqa-wq-openqa": "https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json",
"google/realm-orqa-wq-reader": "https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __lowercase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "realm"
def __init__( self : str , UpperCamelCase_ : List[Any]=30_522 , UpperCamelCase_ : Dict=768 , UpperCamelCase_ : Union[str, Any]=128 , UpperCamelCase_ : Tuple=12 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : Optional[int]=8 , UpperCamelCase_ : str=3_072 , UpperCamelCase_ : List[str]="gelu_new" , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : str=512 , UpperCamelCase_ : int=2 , UpperCamelCase_ : str=0.02 , UpperCamelCase_ : List[Any]=1e-12 , UpperCamelCase_ : str=256 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : int=1e-3 , UpperCamelCase_ : List[str]=5 , UpperCamelCase_ : Tuple=320 , UpperCamelCase_ : List[str]=13_353_718 , UpperCamelCase_ : Tuple=5_000 , UpperCamelCase_ : Optional[int]=1 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : Union[str, Any]=2 , **UpperCamelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
# Common config
__A = vocab_size
__A = max_position_embeddings
__A = hidden_size
__A = retriever_proj_size
__A = num_hidden_layers
__A = num_attention_heads
__A = num_candidates
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = initializer_range
__A = type_vocab_size
__A = layer_norm_eps
# Reader config
__A = span_hidden_size
__A = max_span_width
__A = reader_layer_norm_eps
__A = reader_beam_size
__A = reader_seq_len
# Retrieval config
__A = num_block_records
__A = searcher_beam_size
| 637
| 1
|
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : CLIPSegForImageSegmentation , UpperCAmelCase_ : CLIPSegProcessor , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : CLIPTextModel , UpperCAmelCase_ : CLIPTokenizer , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase_ : StableDiffusionSafetyChecker , UpperCAmelCase_ : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
SCREAMING_SNAKE_CASE : Optional[int] = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , UpperCAmelCase_ , standard_warn=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = dict(scheduler.config )
SCREAMING_SNAKE_CASE : Optional[Any] = 1
SCREAMING_SNAKE_CASE : int = FrozenDict(UpperCAmelCase_ )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
SCREAMING_SNAKE_CASE : Any = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , UpperCAmelCase_ , standard_warn=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : int = dict(scheduler.config )
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : List[Any] = FrozenDict(UpperCAmelCase_ )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=UpperCAmelCase_ , segmentation_processor=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , )
def _A ( self : Optional[Any] , UpperCAmelCase_ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : Dict = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase_ )
def _A ( self : Union[str, Any] ):
self.enable_attention_slicing(UpperCAmelCase_ )
def _A ( self : Union[str, Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase_ , UpperCAmelCase_ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _A ( self : Tuple ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : int , UpperCAmelCase_ : Union[str, List[str]] , UpperCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , UpperCAmelCase_ : str , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Optional[Any] , ):
SCREAMING_SNAKE_CASE : Any = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
SCREAMING_SNAKE_CASE : Dict = self.segmentation_model(**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(UpperCAmelCase_ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
SCREAMING_SNAKE_CASE : List[str] = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , mask_image=UpperCAmelCase_ , height=UpperCAmelCase_ , width=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_ , guidance_scale=UpperCAmelCase_ , negative_prompt=UpperCAmelCase_ , num_images_per_prompt=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , latents=UpperCAmelCase_ , output_type=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , callback=UpperCAmelCase_ , callback_steps=UpperCAmelCase_ , )
| 488
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Any=18 , UpperCAmelCase_ : Optional[int]=30 , UpperCAmelCase_ : Any=400 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Any=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Dict=[0.5, 0.5, 0.5] , ):
SCREAMING_SNAKE_CASE : Any = parent
SCREAMING_SNAKE_CASE : int = batch_size
SCREAMING_SNAKE_CASE : Dict = num_channels
SCREAMING_SNAKE_CASE : str = image_size
SCREAMING_SNAKE_CASE : Union[str, Any] = min_resolution
SCREAMING_SNAKE_CASE : Any = max_resolution
SCREAMING_SNAKE_CASE : int = do_resize
SCREAMING_SNAKE_CASE : Union[str, Any] = size if size is not None else {"height": 18, "width": 20}
SCREAMING_SNAKE_CASE : Optional[Any] = do_thumbnail
SCREAMING_SNAKE_CASE : Tuple = do_align_axis
SCREAMING_SNAKE_CASE : List[str] = do_pad
SCREAMING_SNAKE_CASE : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE : Any = image_mean
SCREAMING_SNAKE_CASE : Any = image_std
def _A ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : str = DonutImageProcessor if is_vision_available() else None
def _A ( self : Tuple ):
SCREAMING_SNAKE_CASE : Optional[Any] = DonutImageProcessingTester(self )
@property
def _A ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_thumbnail" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_align_long_axis" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_pad" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "image_std" ) )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 20} )
SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
# Previous config had dimensions in (width, height) order
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"height": 84, "width": 42} )
def _A ( self : Dict ):
pass
@is_flaky()
def _A ( self : Optional[Any] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Dict = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def _A ( self : List[str] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Optional[int] = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
@is_flaky()
def _A ( self : Dict ):
# Initialize image_processing
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 488
| 1
|
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase ( self : str ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = """laion/clap-htsat-unfused"""
_UpperCAmelCase = tempfile.mkdtemp()
def lowerCamelCase ( self : List[str] , **lowerCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
return RobertaTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase )
def lowerCamelCase ( self : List[Any] , **lowerCamelCase : Any ) -> Any:
"""simple docstring"""
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **lowerCamelCase )
def lowerCamelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase ( self : Any ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = ClapProcessor(tokenizer=lowerCamelCase , feature_extractor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowerCamelCase )
def lowerCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_UpperCAmelCase = self.get_feature_extractor(do_normalize=lowerCamelCase , padding_value=1.0 )
_UpperCAmelCase = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowerCamelCase )
def lowerCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = ClapProcessor(tokenizer=lowerCamelCase , feature_extractor=lowerCamelCase )
_UpperCAmelCase = floats_list((3, 1000) )
_UpperCAmelCase = feature_extractor(lowerCamelCase , return_tensors="""np""" )
_UpperCAmelCase = processor(audios=lowerCamelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = ClapProcessor(tokenizer=lowerCamelCase , feature_extractor=lowerCamelCase )
_UpperCAmelCase = """This is a test string"""
_UpperCAmelCase = processor(text=lowerCamelCase )
_UpperCAmelCase = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = ClapProcessor(tokenizer=lowerCamelCase , feature_extractor=lowerCamelCase )
_UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase = processor.batch_decode(lowerCamelCase )
_UpperCAmelCase = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def lowerCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.get_feature_extractor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = ClapProcessor(tokenizer=lowerCamelCase , feature_extractor=lowerCamelCase )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 108
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Any = "falcon"
__lowerCamelCase : List[str] = ["past_key_values"]
def __init__( self , _lowerCAmelCase=65024 , _lowerCAmelCase=4544 , _lowerCAmelCase=32 , _lowerCAmelCase=71 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=11 , _lowerCAmelCase=11 , **_lowerCAmelCase , ) -> Union[str, Any]:
_lowerCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_lowerCAmelCase = kwargs.pop("n_embed" , _lowerCAmelCase )
_lowerCAmelCase = hidden_size if n_embed is None else n_embed
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = use_cache
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
_lowerCAmelCase = alibi
_lowerCAmelCase = new_decoder_architecture
_lowerCAmelCase = multi_query # Ignored when new_decoder_architecture is True
_lowerCAmelCase = parallel_attn
_lowerCAmelCase = bias
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
@property
def _snake_case ( self ) -> Optional[Any]:
return self.hidden_size // self.num_attention_heads
@property
def _snake_case ( self ) -> Optional[Any]:
return not self.alibi
| 18
| 0
|
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Dict ) ->Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = """laion/clap-htsat-unfused"""
SCREAMING_SNAKE_CASE : Optional[int] = tempfile.mkdtemp()
def _lowercase ( self : Optional[int] , **UpperCAmelCase__ : Any ) ->str:
"""simple docstring"""
return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase__ )
def _lowercase ( self : int , **UpperCAmelCase__ : List[Any] ) ->int:
"""simple docstring"""
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase__ )
def _lowercase ( self : int ) ->Optional[int]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : List[Any] ) ->Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_feature_extractor()
SCREAMING_SNAKE_CASE : Dict = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Union[str, Any] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
SCREAMING_SNAKE_CASE : Any = self.get_feature_extractor(do_normalize=UpperCAmelCase__ , padding_value=1.0 )
SCREAMING_SNAKE_CASE : Tuple = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ )
def _lowercase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = self.get_feature_extractor()
SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Dict = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = floats_list((3, 1_0_0_0) )
SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor(UpperCAmelCase__ , return_tensors="""np""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(audios=UpperCAmelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowercase ( self : Optional[int] ) ->str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_feature_extractor()
SCREAMING_SNAKE_CASE : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = """This is a test string"""
SCREAMING_SNAKE_CASE : Optional[int] = processor(text=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(UpperCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_feature_extractor()
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE : str = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
SCREAMING_SNAKE_CASE : List[Any] = processor.batch_decode(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : int = tokenizer.batch_decode(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : List[str] ) ->Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = self.get_feature_extractor()
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 446
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ : str = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[str] = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCAmelCase__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 446
| 1
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase (a_ ):
snake_case_ = (IPNDMScheduler,)
snake_case_ = (("""num_inference_steps""", 50),)
def __UpperCAmelCase ( self , **__UpperCamelCase )-> Union[str, Any]:
__lowerCAmelCase = {"num_train_timesteps": 1_0_0_0}
config.update(**__UpperCamelCase )
return config
def __UpperCAmelCase ( self , __UpperCamelCase=0 , **__UpperCamelCase )-> Optional[int]:
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
__lowerCAmelCase = dummy_past_residuals[:]
if time_step is None:
__lowerCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
__lowerCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
__lowerCAmelCase = dummy_past_residuals[:]
__lowerCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__lowerCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self )-> Tuple:
pass
def __UpperCAmelCase ( self , __UpperCamelCase=0 , **__UpperCamelCase )-> int:
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase = dummy_past_residuals[:]
if time_step is None:
__lowerCAmelCase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
__lowerCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase = dummy_past_residuals[:]
__lowerCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__lowerCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self , **__UpperCamelCase )-> Optional[Any]:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
__lowerCAmelCase = 1_0
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def __UpperCAmelCase ( self )-> List[Any]:
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__UpperCamelCase )
elif num_inference_steps is not None and not hasattr(__UpperCamelCase , "set_timesteps" ):
__lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
__lowerCAmelCase = dummy_past_residuals[:]
__lowerCAmelCase = scheduler.timesteps[5]
__lowerCAmelCase = scheduler.timesteps[6]
__lowerCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__lowerCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __UpperCAmelCase ( self )-> Dict:
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase , time_step=__UpperCamelCase )
def __UpperCAmelCase ( self )-> int:
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=__UpperCamelCase )
def __UpperCAmelCase ( self )-> int:
__lowerCAmelCase = self.full_loop()
__lowerCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 2_5_4_0_5_2_9 ) < 1_0
| 367
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase (a_ ):
snake_case_ = (PNDMScheduler,)
snake_case_ = (("""num_inference_steps""", 50),)
def __UpperCAmelCase ( self , **__UpperCamelCase )-> int:
__lowerCAmelCase = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**__UpperCamelCase )
return config
def __UpperCAmelCase ( self , __UpperCamelCase=0 , **__UpperCamelCase )-> Optional[int]:
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
__lowerCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
__lowerCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
__lowerCAmelCase = dummy_past_residuals[:]
__lowerCAmelCase = scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = new_scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__lowerCAmelCase = scheduler.step_plms(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = new_scheduler.step_plms(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self )-> Dict:
pass
def __UpperCAmelCase ( self , __UpperCamelCase=0 , **__UpperCamelCase )-> Dict:
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
__lowerCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase = dummy_past_residuals[:]
__lowerCAmelCase = scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = new_scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__lowerCAmelCase = scheduler.step_plms(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = new_scheduler.step_plms(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self , **__UpperCamelCase )-> Tuple:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
__lowerCAmelCase = 1_0
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
__lowerCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
__lowerCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = scheduler.step_plms(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def __UpperCAmelCase ( self )-> int:
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__UpperCamelCase )
elif num_inference_steps is not None and not hasattr(__UpperCamelCase , "set_timesteps" ):
__lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
__lowerCAmelCase = dummy_past_residuals[:]
__lowerCAmelCase = scheduler.step_prk(__UpperCamelCase , 0 , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = scheduler.step_prk(__UpperCamelCase , 1 , __UpperCamelCase , **__UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__lowerCAmelCase = scheduler.step_plms(__UpperCamelCase , 0 , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = scheduler.step_plms(__UpperCamelCase , 1 , __UpperCamelCase , **__UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __UpperCAmelCase ( self )-> List[str]:
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def __UpperCAmelCase ( self )-> Any:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__UpperCamelCase )
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(steps_offset=1 )
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , )
def __UpperCAmelCase ( self )-> int:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=__UpperCamelCase , beta_end=__UpperCamelCase )
def __UpperCAmelCase ( self )-> Optional[int]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__UpperCamelCase )
def __UpperCAmelCase ( self )-> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def __UpperCAmelCase ( self )-> List[Any]:
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=__UpperCamelCase )
def __UpperCAmelCase ( self )-> Optional[Any]:
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=__UpperCamelCase )
def __UpperCAmelCase ( self )-> int:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
__lowerCAmelCase = 2_7
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
__lowerCAmelCase = scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
def __UpperCAmelCase ( self )-> Dict:
with self.assertRaises(__UpperCamelCase ):
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def __UpperCAmelCase ( self )-> Any:
__lowerCAmelCase = self.full_loop()
__lowerCAmelCase = torch.sum(torch.abs(__UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def __UpperCAmelCase ( self )-> str:
__lowerCAmelCase = self.full_loop(prediction_type="v_prediction" )
__lowerCAmelCase = torch.sum(torch.abs(__UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def __UpperCAmelCase ( self )-> Any:
# We specify different beta, so that the first alpha is 0.99
__lowerCAmelCase = self.full_loop(set_alpha_to_one=__UpperCamelCase , beta_start=0.0_1 )
__lowerCAmelCase = torch.sum(torch.abs(__UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def __UpperCAmelCase ( self )-> Optional[Any]:
# We specify different beta, so that the first alpha is 0.99
__lowerCAmelCase = self.full_loop(set_alpha_to_one=__UpperCamelCase , beta_start=0.0_1 )
__lowerCAmelCase = torch.sum(torch.abs(__UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 367
| 1
|
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __UpperCamelCase ( ):
A_ : str = HfArgumentParser(snake_case__ )
A_ : str = parser.parse_args_into_dataclasses()[0]
A_ : Any = TensorFlowBenchmark(args=snake_case__ )
try:
A_ : List[str] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
A_ : Dict = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
A_ : Optional[int] = """ """.join(str(snake_case__ ).split(""" """ )[:-1] )
A_ : Optional[Any] = """"""
A_ : Optional[int] = eval(str(snake_case__ ).split(""" """ )[-1] )
A_ : Union[str, Any] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(snake_case__ )
if len(snake_case__ ) > 0:
A_ : Optional[Any] = full_error_msg + begin_error_msg + str(snake_case__ )
raise ValueError(snake_case__ )
benchmark.run()
if __name__ == "__main__":
main()
| 480
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Optional[int] = ["""image_processor""", """tokenizer"""]
_A : str = """AutoImageProcessor"""
_A : Union[str, Any] = """AutoTokenizer"""
def __init__(self , lowerCAmelCase_ , lowerCAmelCase_ ):
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
A_ : Union[str, Any] = self.image_processor
def __call__(self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ):
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
A_ : Optional[int] = self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if images is not None:
A_ : List[Any] = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is not None and images is not None:
A_ : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ )
def lowerCamelCase(self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase(self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def lowerCamelCase(self ):
return ["input_ids", "attention_mask", "pixel_values"]
| 480
| 1
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {'''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE : Dict = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_: Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_: Optional[Any] = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_: Any = None
def __init__( self , __a=None , __a=None , __a=None , __a="<unk>" , __a="<s>" , __a="</s>" , __a="<pad>" , __a=False , __a=False , **__a , ):
"""simple docstring"""
super().__init__(
__a , __a , tokenizer_file=__a , unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , add_prefix_space=__a , clean_up_tokenization_spaces=__a , **__a , )
A__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __a ) != add_prefix_space:
A__ = getattr(__a , pre_tok_state.pop('type' ) )
A__ = add_prefix_space
A__ = pre_tok_class(**__a )
A__ = add_prefix_space
def _UpperCAmelCase ( self , *__a , **__a ):
"""simple docstring"""
A__ = kwargs.get('is_split_into_words' , __a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
' pretokenized inputs.' )
return super()._batch_encode_plus(*__a , **__a )
def _UpperCAmelCase ( self , *__a , **__a ):
"""simple docstring"""
A__ = kwargs.get('is_split_into_words' , __a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
' pretokenized inputs.' )
return super()._encode_plus(*__a , **__a )
def _UpperCAmelCase ( self , __a , __a = None ):
"""simple docstring"""
A__ = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def _UpperCAmelCase ( self , __a ):
"""simple docstring"""
A__ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a , add_special_tokens=__a ) + [self.eos_token_id] )
if len(__a ) > self.model_max_length:
A__ = input_ids[-self.model_max_length :]
return input_ids
| 260
|
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 260
| 1
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__UpperCamelCase : Dict = 0
__UpperCamelCase : Any = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__UpperCamelCase : Optional[int] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__UpperCamelCase : int = tuple[int, int]
class __SCREAMING_SNAKE_CASE:
def __init__( self: Union[str, Any] , UpperCamelCase: Dict , UpperCamelCase: Any , UpperCamelCase: Optional[int] , UpperCamelCase: Optional[int] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , ) -> None:
snake_case__ = pos_x
snake_case__ = pos_y
snake_case__ = (pos_y, pos_x)
snake_case__ = goal_x
snake_case__ = goal_y
snake_case__ = g_cost
snake_case__ = parent
snake_case__ = self.calculate_heuristic()
snake_case__ = self.g_cost + self.h_cost
def lowerCAmelCase_ ( self: Optional[int] ) -> float:
snake_case__ = self.pos_x - self.goal_x
snake_case__ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(_a ) + abs(_a )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self: Any , UpperCamelCase: Optional[Any] ) -> bool:
return self.f_cost < other.f_cost
class __SCREAMING_SNAKE_CASE:
def __init__( self: str , UpperCamelCase: Any , UpperCamelCase: Dict ) -> Dict:
snake_case__ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _a )
snake_case__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , _a )
snake_case__ = [self.start]
snake_case__ = []
snake_case__ = False
def lowerCAmelCase_ ( self: List[Any] ) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
snake_case__ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(_a )
self.closed_nodes.append(_a )
snake_case__ = self.get_successors(_a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_a )
else:
# retrieve the best current path
snake_case__ = self.open_nodes.pop(self.open_nodes.index(_a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_a )
else:
self.open_nodes.append(_a )
return [self.start.pos]
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: Optional[Any] ) -> list[Node]:
snake_case__ = []
for action in delta:
snake_case__ = parent.pos_x + action[1]
snake_case__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_a , _a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _a , ) )
return successors
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: str ) -> list[TPosition]:
snake_case__ = node
snake_case__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case__ = current_node.parent
path.reverse()
return path
class __SCREAMING_SNAKE_CASE:
def __init__( self: Dict , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[str, Any] ) -> None:
snake_case__ = AStar(_a , _a )
snake_case__ = AStar(_a , _a )
snake_case__ = False
def lowerCAmelCase_ ( self: Any ) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
snake_case__ = self.fwd_astar.open_nodes.pop(0 )
snake_case__ = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
_a , _a )
self.fwd_astar.closed_nodes.append(_a )
self.bwd_astar.closed_nodes.append(_a )
snake_case__ = current_bwd_node
snake_case__ = current_fwd_node
snake_case__ = {
self.fwd_astar: self.fwd_astar.get_successors(_a ),
self.bwd_astar: self.bwd_astar.get_successors(_a ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(_a )
else:
# retrieve the best current path
snake_case__ = astar.open_nodes.pop(
astar.open_nodes.index(_a ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(_a )
else:
astar.open_nodes.append(_a )
return [self.fwd_astar.start.pos]
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: Dict , UpperCamelCase: Optional[int] ) -> list[TPosition]:
snake_case__ = self.fwd_astar.retrace_path(_a )
snake_case__ = self.bwd_astar.retrace_path(_a )
bwd_path.pop()
bwd_path.reverse()
snake_case__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__UpperCamelCase : Optional[Any] = (0, 0)
__UpperCamelCase : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__UpperCamelCase : str = time.time()
__UpperCamelCase : Any = AStar(init, goal)
__UpperCamelCase : Optional[Any] = a_star.search()
__UpperCamelCase : Union[str, Any] = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
__UpperCamelCase : Tuple = time.time()
__UpperCamelCase : Any = BidirectionalAStar(init, goal)
__UpperCamelCase : int = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 712
|
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__UpperCamelCase : Any = datasets.utils.logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = ["""names""", """prefix"""]
__UpperCamelCase : List[str] = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""]
__UpperCamelCase : Dict = ["""encoding_errors""", """on_bad_lines"""]
__UpperCamelCase : Any = ["""date_format"""]
@dataclass
class __SCREAMING_SNAKE_CASE( datasets.BuilderConfig ):
_UpperCAmelCase = ","
_UpperCAmelCase = None
_UpperCAmelCase = "infer"
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = True
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = False
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = None
_UpperCAmelCase = "."
_UpperCAmelCase = None
_UpperCAmelCase = '"'
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 0
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = None
_UpperCAmelCase = 1_0_0_0_0
_UpperCAmelCase = None
_UpperCAmelCase = "strict"
_UpperCAmelCase = "error"
_UpperCAmelCase = None
def lowerCAmelCase_ ( self: Tuple ) -> int:
if self.delimiter is not None:
snake_case__ = self.delimiter
if self.column_names is not None:
snake_case__ = self.column_names
@property
def lowerCAmelCase_ ( self: Optional[int] ) -> List[str]:
snake_case__ = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , UpperCamelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __SCREAMING_SNAKE_CASE( datasets.ArrowBasedBuilder ):
_UpperCAmelCase = CsvConfig
def lowerCAmelCase_ ( self: str ) -> int:
return datasets.DatasetInfo(features=self.config.features )
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Dict ) -> int:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
snake_case__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase , (str, list, tuple) ):
snake_case__ = data_files
if isinstance(UpperCamelCase , UpperCamelCase ):
snake_case__ = [files]
snake_case__ = [dl_manager.iter_files(UpperCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
snake_case__ = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase , UpperCamelCase ):
snake_case__ = [files]
snake_case__ = [dl_manager.iter_files(UpperCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCamelCase , gen_kwargs={'files': files} ) )
return splits
def lowerCAmelCase_ ( self: int , UpperCamelCase: pa.Table ) -> pa.Table:
if self.config.features is not None:
snake_case__ = self.config.features.arrow_schema
if all(not require_storage_cast(UpperCamelCase ) for feature in self.config.features.values() ):
# cheaper cast
snake_case__ = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=UpperCamelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
snake_case__ = table_cast(UpperCamelCase , UpperCamelCase )
return pa_table
def lowerCAmelCase_ ( self: str , UpperCamelCase: Tuple ) -> Tuple:
snake_case__ = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
snake_case__ = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(UpperCamelCase ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase ) ):
snake_case__ = pd.read_csv(UpperCamelCase , iterator=UpperCamelCase , dtype=UpperCamelCase , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(UpperCamelCase ):
snake_case__ = pa.Table.from_pandas(UpperCamelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCamelCase )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(UpperCamelCase )}: {e}''' )
raise
| 372
| 0
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __UpperCamelCase ( nn.Module ):
__snake_case :int
__snake_case :int
__snake_case :float = 0.0
__snake_case :int = 1
__snake_case :int = 1
__snake_case :bool = True
__snake_case :bool = False
__snake_case :bool = False
__snake_case :bool = False
__snake_case :jnp.dtype = jnp.floataa
def _a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase = []
__lowercase = []
for i in range(self.num_layers ):
__lowercase = self.in_channels if i == 0 else self.out_channels
__lowercase = FlaxResnetBlockaD(
in_channels=_lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowerCAmelCase )
__lowercase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_lowerCAmelCase )
__lowercase = resnets
__lowercase = attentions
if self.add_downsample:
__lowercase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : int=True ) -> Dict:
"""simple docstring"""
__lowercase = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__lowercase = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
__lowercase = attn(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
__lowercase = self.downsamplers_a(_lowerCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class __UpperCamelCase ( nn.Module ):
__snake_case :int
__snake_case :int
__snake_case :float = 0.0
__snake_case :int = 1
__snake_case :bool = True
__snake_case :jnp.dtype = jnp.floataa
def _a ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = []
for i in range(self.num_layers ):
__lowercase = self.in_channels if i == 0 else self.out_channels
__lowercase = FlaxResnetBlockaD(
in_channels=_lowerCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowerCAmelCase )
__lowercase = resnets
if self.add_downsample:
__lowercase = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int]=True ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = ()
for resnet in self.resnets:
__lowercase = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
__lowercase = self.downsamplers_a(_lowerCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class __UpperCamelCase ( nn.Module ):
__snake_case :int
__snake_case :int
__snake_case :int
__snake_case :float = 0.0
__snake_case :int = 1
__snake_case :int = 1
__snake_case :bool = True
__snake_case :bool = False
__snake_case :bool = False
__snake_case :bool = False
__snake_case :jnp.dtype = jnp.floataa
def _a ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = []
__lowercase = []
for i in range(self.num_layers ):
__lowercase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowercase = self.prev_output_channel if i == 0 else self.out_channels
__lowercase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowerCAmelCase )
__lowercase = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_lowerCAmelCase )
__lowercase = resnets
__lowercase = attentions
if self.add_upsample:
__lowercase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict=True ) -> str:
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__lowercase = res_hidden_states_tuple[-1]
__lowercase = res_hidden_states_tuple[:-1]
__lowercase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowercase = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
__lowercase = attn(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
if self.add_upsample:
__lowercase = self.upsamplers_a(_lowerCAmelCase )
return hidden_states
class __UpperCamelCase ( nn.Module ):
__snake_case :int
__snake_case :int
__snake_case :int
__snake_case :float = 0.0
__snake_case :int = 1
__snake_case :bool = True
__snake_case :jnp.dtype = jnp.floataa
def _a ( self : int ) -> Any:
"""simple docstring"""
__lowercase = []
for i in range(self.num_layers ):
__lowercase = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowercase = self.prev_output_channel if i == 0 else self.out_channels
__lowercase = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowerCAmelCase )
__lowercase = resnets
if self.add_upsample:
__lowercase = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str]=True ) -> Tuple:
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
__lowercase = res_hidden_states_tuple[-1]
__lowercase = res_hidden_states_tuple[:-1]
__lowercase = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowercase = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
if self.add_upsample:
__lowercase = self.upsamplers_a(_lowerCAmelCase )
return hidden_states
class __UpperCamelCase ( nn.Module ):
__snake_case :int
__snake_case :float = 0.0
__snake_case :int = 1
__snake_case :int = 1
__snake_case :bool = False
__snake_case :bool = False
__snake_case :jnp.dtype = jnp.floataa
def _a ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__lowercase = []
for _ in range(self.num_layers ):
__lowercase = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_lowerCAmelCase )
__lowercase = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_lowerCAmelCase )
__lowercase = resnets
__lowercase = attentions
def __call__( self : str , _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any]=True ) -> List[str]:
"""simple docstring"""
__lowercase = self.resnets[0](_lowerCAmelCase , _lowerCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__lowercase = attn(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
__lowercase = resnet(_lowerCAmelCase , _lowerCAmelCase , deterministic=_lowerCAmelCase )
return hidden_states
| 80
|
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_A = logging.getLogger(__name__)
class A ( __UpperCAmelCase ):
def __init__( self, UpperCamelCase__=-1 ):
"""simple docstring"""
lowerCAmelCase_ = label_idx
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
lowerCAmelCase_ = mode.value
lowerCAmelCase_ = os.path.join(UpperCamelCase__, f"{mode}.txt" )
lowerCAmelCase_ = 1
lowerCAmelCase_ = []
with open(UpperCamelCase__, encoding='''utf-8''' ) as f:
lowerCAmelCase_ = []
lowerCAmelCase_ = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}", words=UpperCamelCase__, labels=UpperCamelCase__ ) )
guid_index += 1
lowerCAmelCase_ = []
lowerCAmelCase_ = []
else:
lowerCAmelCase_ = line.split(''' ''' )
words.append(splits[0] )
if len(UpperCamelCase__ ) > 1:
labels.append(splits[self.label_idx].replace('''\n''', '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}", words=UpperCamelCase__, labels=UpperCamelCase__ ) )
return examples
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(UpperCamelCase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCAmelCase_ = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(UpperCamelCase__ )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''', line.split()[0] )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if path:
with open(UpperCamelCase__, '''r''' ) as f:
lowerCAmelCase_ = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase_ = ['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class A ( __UpperCAmelCase ):
def __init__( self ):
"""simple docstring"""
super().__init__(label_idx=-2 )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if path:
with open(UpperCamelCase__, '''r''' ) as f:
lowerCAmelCase_ = f.read().splitlines()
if "O" not in labels:
lowerCAmelCase_ = ['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class A ( __UpperCAmelCase ):
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
lowerCAmelCase_ = mode.value
lowerCAmelCase_ = os.path.join(UpperCamelCase__, f"{mode}.txt" )
lowerCAmelCase_ = 1
lowerCAmelCase_ = []
with open(UpperCamelCase__, encoding='''utf-8''' ) as f:
for sentence in parse_incr(UpperCamelCase__ ):
lowerCAmelCase_ = []
lowerCAmelCase_ = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}", words=UpperCamelCase__, labels=UpperCamelCase__ ) )
guid_index += 1
return examples
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = 0
for sentence in parse_incr(UpperCamelCase__ ):
lowerCAmelCase_ = preds_list[example_id]
lowerCAmelCase_ = ''''''
for token in sentence:
out += f"{token['form']} ({token['upos']}|{s_p.pop(0 )}) "
out += "\n"
writer.write(UpperCamelCase__ )
example_id += 1
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__ ):
"""simple docstring"""
if path:
with open(UpperCamelCase__, '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 431
| 0
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__A = (720, 1280) # Height, Width
__A = (0.4, 0.6) # if height or width lower than this scale, drop it.
__A = 1 / 100
__A = ''''''
__A = ''''''
__A = ''''''
__A = 250
def lowercase_ ( ) -> None:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : List[Any] = get_dataset(_lowerCamelCase , _lowerCamelCase )
for index in range(_lowerCamelCase ):
__lowerCamelCase : Optional[Any] = random.sample(range(len(_lowerCamelCase ) ) , 4 )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = update_image_and_anno(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , filter_scale=_lowerCamelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__lowerCamelCase : Tuple = random_chars(32 )
__lowerCamelCase : Dict = path.split(os.sep )[-1].rsplit("." , 1 )[0]
__lowerCamelCase : List[str] = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , _lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
__lowerCamelCase : List[Any] = []
for anno in new_annos:
__lowerCamelCase : Any = anno[3] - anno[1]
__lowerCamelCase : Optional[int] = anno[4] - anno[2]
__lowerCamelCase : Optional[int] = anno[1] + width / 2
__lowerCamelCase : Union[str, Any] = anno[2] + height / 2
__lowerCamelCase : int = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(_lowerCamelCase )
with open(F"""{file_root}.txt""" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str ) -> tuple[list, list]:
'''simple docstring'''
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : Any = []
for label_file in glob.glob(os.path.join(_lowerCamelCase , "*.txt" ) ):
__lowerCamelCase : List[Any] = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(_lowerCamelCase ) as in_file:
__lowerCamelCase : Tuple = in_file.readlines()
__lowerCamelCase : List[str] = os.path.join(_lowerCamelCase , F"""{label_name}.jpg""" )
__lowerCamelCase : Union[str, Any] = []
for obj_list in obj_lists:
__lowerCamelCase : str = obj_list.rstrip("\n" ).split(" " )
__lowerCamelCase : Union[str, Any] = float(obj[1] ) - float(obj[3] ) / 2
__lowerCamelCase : Tuple = float(obj[2] ) - float(obj[4] ) / 2
__lowerCamelCase : Union[str, Any] = float(obj[1] ) + float(obj[3] ) / 2
__lowerCamelCase : Any = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(_lowerCamelCase )
labels.append(_lowerCamelCase )
return img_paths, labels
def lowercase_ ( _lowerCamelCase: list , _lowerCamelCase: list , _lowerCamelCase: list[int] , _lowerCamelCase: tuple[int, int] , _lowerCamelCase: tuple[float, float] , _lowerCamelCase: float = 0.0 , ) -> tuple[list, list, str]:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__lowerCamelCase : Optional[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowerCamelCase : List[str] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__lowerCamelCase : int = int(scale_x * output_size[1] )
__lowerCamelCase : Optional[Any] = int(scale_y * output_size[0] )
__lowerCamelCase : List[Any] = []
__lowerCamelCase : Optional[Any] = []
for i, index in enumerate(_lowerCamelCase ):
__lowerCamelCase : List[str] = all_img_list[index]
path_list.append(_lowerCamelCase )
__lowerCamelCase : Optional[Any] = all_annos[index]
__lowerCamelCase : List[str] = cva.imread(_lowerCamelCase )
if i == 0: # top-left
__lowerCamelCase : List[str] = cva.resize(_lowerCamelCase , (divid_point_x, divid_point_y) )
__lowerCamelCase : Any = img
for bbox in img_annos:
__lowerCamelCase : str = bbox[1] * scale_x
__lowerCamelCase : Union[str, Any] = bbox[2] * scale_y
__lowerCamelCase : Optional[int] = bbox[3] * scale_x
__lowerCamelCase : Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__lowerCamelCase : str = cva.resize(_lowerCamelCase , (output_size[1] - divid_point_x, divid_point_y) )
__lowerCamelCase : Any = img
for bbox in img_annos:
__lowerCamelCase : List[Any] = scale_x + bbox[1] * (1 - scale_x)
__lowerCamelCase : List[Any] = bbox[2] * scale_y
__lowerCamelCase : Tuple = scale_x + bbox[3] * (1 - scale_x)
__lowerCamelCase : Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__lowerCamelCase : Any = cva.resize(_lowerCamelCase , (divid_point_x, output_size[0] - divid_point_y) )
__lowerCamelCase : List[str] = img
for bbox in img_annos:
__lowerCamelCase : Any = bbox[1] * scale_x
__lowerCamelCase : Optional[int] = scale_y + bbox[2] * (1 - scale_y)
__lowerCamelCase : Dict = bbox[3] * scale_x
__lowerCamelCase : Tuple = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__lowerCamelCase : int = cva.resize(
_lowerCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__lowerCamelCase : Optional[Any] = img
for bbox in img_annos:
__lowerCamelCase : Union[str, Any] = scale_x + bbox[1] * (1 - scale_x)
__lowerCamelCase : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y)
__lowerCamelCase : int = scale_x + bbox[3] * (1 - scale_x)
__lowerCamelCase : int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__lowerCamelCase : str = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowercase_ ( _lowerCamelCase: int ) -> str:
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__lowerCamelCase : Tuple = ascii_lowercase + digits
return "".join(random.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 366
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
__A = logging.get_logger(__name__)
__A = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__A = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
__A = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
__A = {F"""funnel-transformer/{name}""": 512 for name in _model_names}
__A = {F"""funnel-transformer/{name}""": {'''do_lower_case''': True} for name in _model_names}
class _snake_case ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = FunnelTokenizer
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = 2
def __init__( self : List[Any] , UpperCAmelCase : List[str]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : List[Any]=True , UpperCAmelCase : int="<unk>" , UpperCAmelCase : List[Any]="<sep>" , UpperCAmelCase : List[str]="<pad>" , UpperCAmelCase : Union[str, Any]="<cls>" , UpperCAmelCase : int="<mask>" , UpperCAmelCase : List[str]="<s>" , UpperCAmelCase : Union[str, Any]="</s>" , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : int=None , UpperCAmelCase : int="##" , **UpperCAmelCase : Dict , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , clean_text=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , wordpieces_prefix=UpperCAmelCase , **UpperCAmelCase , )
__lowerCamelCase : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCAmelCase ) != tokenize_chinese_chars
):
__lowerCamelCase : Tuple = getattr(UpperCAmelCase , normalizer_state.pop("type" ) )
__lowerCamelCase : Optional[int] = do_lower_case
__lowerCamelCase : Union[str, Any] = strip_accents
__lowerCamelCase : Optional[Any] = tokenize_chinese_chars
__lowerCamelCase : Optional[Any] = normalizer_class(**UpperCAmelCase )
__lowerCamelCase : List[Any] = do_lower_case
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple=None ):
__lowerCamelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
__lowerCamelCase : Tuple = [self.sep_token_id]
__lowerCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
__lowerCamelCase : List[Any] = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 366
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.