code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__magic_name__ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
__magic_name__ : Optional[Any] = parser.parse_args()
__magic_name__ : List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__magic_name__ : Optional[Any] = CLIPImageProcessor()
__magic_name__ : List[str] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
__magic_name__ : str = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 672
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__magic_name__ : Optional[int] = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ):
_snake_case = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
_snake_case = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_snake_case = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 672
| 1
|
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase = 16
lowercase = 32
def UpperCamelCase__ ( a_ , a_ = 16):
snake_case_ = AutoTokenizer.from_pretrained('bert-base-cased')
snake_case_ = load_dataset('glue' , 'mrpc')
def tokenize_function(a_):
# max_length=None => use the model max length (it's actually the default)
snake_case_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__UpperCamelCase , max_length=__UpperCamelCase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(a_):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ = 16
elif accelerator.mixed_precision != "no":
snake_case_ = 8
else:
snake_case_ = None
return tokenizer.pad(
__UpperCamelCase , padding='longest' , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors='pt' , )
# Instantiate dataloaders.
snake_case_ = DataLoader(
tokenized_datasets['train'] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase)
snake_case_ = DataLoader(
tokenized_datasets['validation'] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase = mocked_dataloaders # noqa: F811
def UpperCamelCase__ ( a_ , a_):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __UpperCamelCase) == "1":
snake_case_ = 2
# Initialize accelerator
snake_case_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision)
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ = config['lr']
snake_case_ = int(config['num_epochs'])
snake_case_ = int(config['seed'])
snake_case_ = int(config['batch_size'])
snake_case_ = evaluate.load('glue' , 'mrpc')
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__UpperCamelCase)
def inner_training_loop(a_):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__UpperCamelCase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__UpperCamelCase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ = model.to(accelerator.device)
# Instantiate optimizer
snake_case_ = AdamW(params=model.parameters() , lr=__UpperCamelCase)
snake_case_ , snake_case_ = get_dataloaders(__UpperCamelCase , __UpperCamelCase)
# Instantiate scheduler
snake_case_ = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__UpperCamelCase) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
# Now we train the model
for epoch in range(__UpperCamelCase):
model.train()
for step, batch in enumerate(__UpperCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
snake_case_ = model(**__UpperCamelCase)
snake_case_ = outputs.loss
accelerator.backward(__UpperCamelCase)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__UpperCamelCase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
snake_case_ = model(**__UpperCamelCase)
snake_case_ = outputs.logits.argmax(dim=-1)
snake_case_ , snake_case_ = accelerator.gather_for_metrics((predictions, batch['labels']))
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
snake_case_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __UpperCamelCase)
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def UpperCamelCase__ ( ):
snake_case_ = argparse.ArgumentParser(description='Simple example of training script.')
parser.add_argument(
'--mixed_precision' , type=__UpperCamelCase , default=__UpperCamelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.')
snake_case_ = parser.parse_args()
snake_case_ = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__UpperCamelCase , __UpperCamelCase)
if __name__ == "__main__":
main()
| 708
|
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def __UpperCAmelCase ( a_):
return 1.0 / (1.0 + np.exp(-_outputs))
def __UpperCAmelCase ( a_):
snake_case_ = np.max(_outputs , axis=-1 , keepdims=a_)
snake_case_ = np.exp(_outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=a_)
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = '''sigmoid'''
lowerCAmelCase = '''softmax'''
lowerCAmelCase = '''none'''
@add_end_docstrings(
snake_case_ , R'''
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `"default"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `"sigmoid"`: Applies the sigmoid function on the output.
- `"softmax"`: Applies the softmax function on the output.
- `"none"`: Does not apply any function on the output.
''' , )
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = False
lowerCAmelCase = ClassificationFunction.NONE
def __init__( self , **a ) -> int:
super().__init__(**a )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _UpperCamelCase ( self , a=None , a=None , a="" , **a ) -> Tuple:
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
snake_case_ = tokenizer_kwargs
snake_case_ = {}
if hasattr(self.model.config , 'return_all_scores' ) and return_all_scores is None:
snake_case_ = self.model.config.return_all_scores
if isinstance(a , a ) or top_k is None:
snake_case_ = top_k
snake_case_ = False
elif return_all_scores is not None:
warnings.warn(
'`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of'
' `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.' , a , )
if return_all_scores:
snake_case_ = None
else:
snake_case_ = 1
if isinstance(a , a ):
snake_case_ = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
snake_case_ = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self , *a , **a ) -> List[str]:
snake_case_ = super().__call__(*a , **a )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
snake_case_ = 'top_k' not in kwargs
if isinstance(args[0] , a ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _UpperCamelCase ( self , a , **a ) -> Dict[str, GenericTensor]:
snake_case_ = self.framework
if isinstance(a , a ):
return self.tokenizer(**a , return_tensors=a , **a )
elif isinstance(a , a ) and len(a ) == 1 and isinstance(inputs[0] , a ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=a , **a )
elif isinstance(a , a ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
'The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a'
' dictionary `{"text": "My text", "text_pair": "My pair"}` in order to send a text pair.' )
return self.tokenizer(a , return_tensors=a , **a )
def _UpperCamelCase ( self , a ) -> str:
return self.model(**a )
def _UpperCamelCase ( self , a , a=None , a=1 , a=True ) -> Any:
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
snake_case_ = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
snake_case_ = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , 'function_to_apply' ) and function_to_apply is None:
snake_case_ = self.model.config.function_to_apply
else:
snake_case_ = ClassificationFunction.NONE
snake_case_ = model_outputs['logits'][0]
snake_case_ = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
snake_case_ = sigmoid(a )
elif function_to_apply == ClassificationFunction.SOFTMAX:
snake_case_ = softmax(a )
elif function_to_apply == ClassificationFunction.NONE:
snake_case_ = outputs
else:
raise ValueError(F'''Unrecognized `function_to_apply` argument: {function_to_apply}''' )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
snake_case_ = [
{'label': self.model.config.idalabel[i], 'score': score.item()} for i, score in enumerate(a )
]
if not _legacy:
dict_scores.sort(key=lambda a : x["score"] , reverse=a )
if top_k is not None:
snake_case_ = dict_scores[:top_k]
return dict_scores
| 607
| 0
|
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def a ( A__ , A__ , A__ , A__ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE__ : str = {
'''wmt16-en-de-dist-12-1''': [2_8.3, 2_7.5_2],
'''wmt16-en-de-dist-6-1''': [2_7.4, 2_7.1_1],
'''wmt16-en-de-12-1''': [2_6.9, 2_5.7_5],
}
SCREAMING_SNAKE_CASE__ : int = f"""{src_lang}-{tgt_lang}"""
SCREAMING_SNAKE_CASE__ : str = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=A__ , exist_ok=A__ )
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(A__ , '''README.md''' )
print(f"""Generating {path}""" )
with open(A__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(A__ )
# make sure we are under the root of the project
a_ :Optional[Any] = Path(__file__).resolve().parent.parent.parent
a_ :Union[str, Any] = repo_dir / 'model_cards'
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
a_ :Union[str, Any] = model_cards_dir / 'allenai' / model_name
write_model_card(model_card_dir, src_lang='en', tgt_lang='de', model_name=model_name)
| 35
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase ( self )-> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self )-> Union[str, Any]:
_A = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
_A = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
sd_pipe.set_scheduler('sample_euler' )
_A = 'A painting of a squirrel eating a burger'
_A = torch.manual_seed(0 )
_A = sd_pipe([prompt] , generator=_UpperCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self )-> Optional[Any]:
_A = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
_A = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
sd_pipe.set_scheduler('sample_euler' )
_A = 'A painting of a squirrel eating a burger'
_A = torch.manual_seed(0 )
_A = sd_pipe([prompt] , generator=_UpperCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def UpperCamelCase ( self )-> Optional[int]:
_A = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
_A = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
_A = 'A painting of a squirrel eating a burger'
_A = torch.manual_seed(0 )
_A = sd_pipe(
[prompt] , generator=_UpperCamelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=_UpperCamelCase , )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 292
| 0
|
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowerCamelCase = NewType('''DataClass''', Any)
lowerCamelCase = NewType('''DataClassType''', Any)
def __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
__lowerCAmelCase = {str(__lowerCAmelCase ): choice for choice in choices}
return lambda UpperCAmelCase__ : str_to_choice.get(__lowerCAmelCase , __lowerCAmelCase )
def __lowercase ( *,
UpperCAmelCase__ = None , UpperCAmelCase__ = None , UpperCAmelCase__ = dataclasses.MISSING , UpperCAmelCase__ = dataclasses.MISSING , UpperCAmelCase__ = None , **UpperCAmelCase__ , ):
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__lowerCAmelCase = {}
if aliases is not None:
__lowerCAmelCase = aliases
if help is not None:
__lowerCAmelCase = help
return dataclasses.field(metadata=__lowerCAmelCase , default=__lowerCAmelCase , default_factory=__lowerCAmelCase , **__lowerCAmelCase )
class snake_case_ ( _a ):
"""simple docstring"""
__UpperCAmelCase =42
def __init__( self , _A , **_A ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
__lowerCAmelCase = ArgumentDefaultsHelpFormatter
super().__init__(**_lowerCAmelCase )
if dataclasses.is_dataclass(_lowerCAmelCase ):
__lowerCAmelCase = [dataclass_types]
__lowerCAmelCase = list(_lowerCAmelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(_lowerCAmelCase )
@staticmethod
def A__ ( _A , _A ):
__lowerCAmelCase = F"""--{field.name}"""
__lowerCAmelCase = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , _lowerCAmelCase ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
__lowerCAmelCase = kwargs.pop('aliases' , [] )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = [aliases]
__lowerCAmelCase = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(_lowerCAmelCase , 'UnionType' ) and isinstance(_lowerCAmelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(_lowerCAmelCase ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F""" Problem encountered in field \'{field.name}\'.""" )
if type(_lowerCAmelCase ) not in field.type.__args__:
# filter `str` in Union
__lowerCAmelCase = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__lowerCAmelCase = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__lowerCAmelCase = (
field.type.__args__[0] if isinstance(_lowerCAmelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
__lowerCAmelCase = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__lowerCAmelCase = {}
if origin_type is Literal or (isinstance(field.type , _lowerCAmelCase ) and issubclass(field.type , _lowerCAmelCase )):
if origin_type is Literal:
__lowerCAmelCase = field.type.__args__
else:
__lowerCAmelCase = [x.value for x in field.type]
__lowerCAmelCase = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
__lowerCAmelCase = field.default
else:
__lowerCAmelCase = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__lowerCAmelCase = copy(_lowerCAmelCase )
# Hack because type=bool in argparse does not behave as we want.
__lowerCAmelCase = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__lowerCAmelCase = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__lowerCAmelCase = default
# This tells argparse we accept 0 or 1 value after --field_name
__lowerCAmelCase = '?'
# This is the value that will get picked if we do --field_name (without value)
__lowerCAmelCase = True
elif isclass(_lowerCAmelCase ) and issubclass(_lowerCAmelCase , _lowerCAmelCase ):
__lowerCAmelCase = field.type.__args__[0]
__lowerCAmelCase = '+'
if field.default_factory is not dataclasses.MISSING:
__lowerCAmelCase = field.default_factory()
elif field.default is dataclasses.MISSING:
__lowerCAmelCase = True
else:
__lowerCAmelCase = field.type
if field.default is not dataclasses.MISSING:
__lowerCAmelCase = field.default
elif field.default_factory is not dataclasses.MISSING:
__lowerCAmelCase = field.default_factory()
else:
__lowerCAmelCase = True
parser.add_argument(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__lowerCAmelCase = False
parser.add_argument(F"""--no_{field.name}""" , action='store_false' , dest=field.name , **_lowerCAmelCase )
def A__ ( self , _A ):
if hasattr(_lowerCAmelCase , '_argument_group_name' ):
__lowerCAmelCase = self.add_argument_group(dtype._argument_group_name )
else:
__lowerCAmelCase = self
try:
__lowerCAmelCase = get_type_hints(_lowerCAmelCase )
except NameError:
raise RuntimeError(
F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(_lowerCAmelCase ):
__lowerCAmelCase = '.'.join(map(_lowerCAmelCase , sys.version_info[:3] ) )
raise RuntimeError(
F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(_lowerCAmelCase ):
if not field.init:
continue
__lowerCAmelCase = type_hints[field.name]
self._parse_dataclass_field(_lowerCAmelCase , _lowerCAmelCase )
def A__ ( self , _A=None , _A=False , _A=True , _A=None , _A=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__lowerCAmelCase = []
if args_filename:
args_files.append(Path(_lowerCAmelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__lowerCAmelCase = ArgumentParser()
args_file_parser.add_argument(_lowerCAmelCase , type=_lowerCAmelCase , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
__lowerCAmelCase, __lowerCAmelCase = args_file_parser.parse_known_args(args=_lowerCAmelCase )
__lowerCAmelCase = vars(_lowerCAmelCase ).get(args_file_flag.lstrip('-' ) , _lowerCAmelCase )
if cmd_args_file_paths:
args_files.extend([Path(_lowerCAmelCase ) for p in cmd_args_file_paths] )
__lowerCAmelCase = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__lowerCAmelCase = file_args + args if args is not None else file_args + sys.argv[1:]
__lowerCAmelCase, __lowerCAmelCase = self.parse_known_args(args=_lowerCAmelCase )
__lowerCAmelCase = []
for dtype in self.dataclass_types:
__lowerCAmelCase = {f.name for f in dataclasses.fields(_lowerCAmelCase ) if f.init}
__lowerCAmelCase = {k: v for k, v in vars(_lowerCAmelCase ).items() if k in keys}
for k in keys:
delattr(_lowerCAmelCase , _lowerCAmelCase )
__lowerCAmelCase = dtype(**_lowerCAmelCase )
outputs.append(_lowerCAmelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(_lowerCAmelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def A__ ( self , _A , _A = False ):
__lowerCAmelCase = set(args.keys() )
__lowerCAmelCase = []
for dtype in self.dataclass_types:
__lowerCAmelCase = {f.name for f in dataclasses.fields(_lowerCAmelCase ) if f.init}
__lowerCAmelCase = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__lowerCAmelCase = dtype(**_lowerCAmelCase )
outputs.append(_lowerCAmelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(_lowerCAmelCase )}""" )
return tuple(_lowerCAmelCase )
def A__ ( self , _A , _A = False ):
with open(Path(_lowerCAmelCase ) , encoding='utf-8' ) as open_json_file:
__lowerCAmelCase = json.loads(open_json_file.read() )
__lowerCAmelCase = self.parse_dict(_lowerCAmelCase , allow_extra_keys=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def A__ ( self , _A , _A = False ):
__lowerCAmelCase = self.parse_dict(yaml.safe_load(Path(_lowerCAmelCase ).read_text() ) , allow_extra_keys=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 709
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class snake_case_ :
"""simple docstring"""
def __init__( self , _A , _A=None , _A=None , _A=None , _A="resnet50" , _A=3 , _A=3_2 , _A=3 , _A=True , _A=True , ):
__lowerCAmelCase = parent
__lowerCAmelCase = out_indices if out_indices is not None else [4]
__lowerCAmelCase = stage_names
__lowerCAmelCase = out_features
__lowerCAmelCase = backbone
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = use_pretrained_backbone
__lowerCAmelCase = is_training
def A__ ( self ):
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = self.get_config()
return config, pixel_values
def A__ ( self ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def A__ ( self , _A , _A ):
__lowerCAmelCase = TimmBackbone(config=_A )
model.to(_A )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(_A )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def A__ ( self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase, __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class snake_case_ ( _a , _a , _a , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase =(TimmBackbone,) if is_torch_available() else ()
__UpperCAmelCase ={"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =False
def A__ ( self ):
__lowerCAmelCase = TimmBackboneModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A )
def A__ ( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ):
__lowerCAmelCase = 'resnet18'
__lowerCAmelCase = 'microsoft/resnet-18'
__lowerCAmelCase = AutoBackbone.from_pretrained(_A , use_timm_backbone=_A )
__lowerCAmelCase = AutoBackbone.from_pretrained(_A )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowerCAmelCase = AutoBackbone.from_pretrained(_A , use_timm_backbone=_A , out_indices=[1, 2, 3] )
__lowerCAmelCase = AutoBackbone.from_pretrained(_A , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def A__ ( self ):
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def A__ ( self ):
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def A__ ( self ):
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def A__ ( self ):
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def A__ ( self ):
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def A__ ( self ):
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def A__ ( self ):
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def A__ ( self ):
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def A__ ( self ):
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def A__ ( self ):
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def A__ ( self ):
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def A__ ( self ):
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def A__ ( self ):
pass
@unittest.skip('Safetensors is not supported by timm.' )
def A__ ( self ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def A__ ( self ):
pass
def A__ ( self ):
__lowerCAmelCase, __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def A__ ( self ):
__lowerCAmelCase, __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowerCAmelCase = self.all_model_classes[0]
__lowerCAmelCase = model_class(_A )
model.to(_A )
__lowerCAmelCase = self._prepare_for_class(_A , _A )
__lowerCAmelCase = model(**_A )
__lowerCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
__lowerCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_A )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def A__ ( self ):
__lowerCAmelCase, __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(**_A )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowerCAmelCase = copy.deepcopy(_A )
__lowerCAmelCase = None
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(**_A )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowerCAmelCase = copy.deepcopy(_A )
__lowerCAmelCase = False
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(**_A )
| 102
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 85
|
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
__lowercase : Any =(
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
__lowercase : Union[str, Any] =(
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
__lowercase : List[str] =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
__lowercase : str =(
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
__lowercase : Union[str, Any] =(
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]),
("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
__lowercase : str =(
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
__lowercase : int =(
("""JH AH TH KH QH""", 23),
("""JH 9H TH KH QH""", 22),
("""JC KH JS JD JH""", 21),
("""KH KC 3S 3H 3D""", 20),
("""8C 9C 5C 3C TC""", 19),
("""JS QS 9H TS KH""", 18),
("""7C 7S KH 2H 7H""", 17),
("""3C KH 5D 5S KH""", 16),
("""QH 8H KD JH 8S""", 15),
("""2D 6D 9D TH 7D""", 14),
)
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ =randrange(len(lowercase__ ) ), randrange(len(lowercase__ ) )
UpperCAmelCase_ =["Loss", "Tie", "Win"][(play >= oppo) + (play > oppo)]
UpperCAmelCase_ , UpperCAmelCase_ =SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def a__ ( lowercase__ = 1_0_0 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(lowercase__ ))
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_flush() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_straight() == expected
@pytest.mark.parametrize("hand, expected, card_values" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand(lowercase__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._is_same_kind() == expected
@pytest.mark.parametrize("hand, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ )._hand_type == expected
@pytest.mark.parametrize("hand, other, expected" , lowercase__ )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
@pytest.mark.parametrize("hand, other, expected" , generate_random_hands() )
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
assert PokerHand(lowercase__ ).compare_with(PokerHand(lowercase__ ) ) == expected
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand(lowercase__ ) for hand in SORTED_HANDS]
UpperCAmelCase_ =poker_hands.copy()
shuffle(lowercase__ )
UpperCAmelCase_ =chain(sorted(lowercase__ ) )
for index, hand in enumerate(lowercase__ ):
assert hand == poker_hands[index]
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =[PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )]
pokerhands.sort(reverse=lowercase__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =PokerHand("2C 4S AS 3D 5C" )
UpperCAmelCase_ =True
UpperCAmelCase_ =[5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def a__ ( ):
'''simple docstring'''
UpperCAmelCase_ =0
UpperCAmelCase_ =os.path.abspath(os.path.dirname(lowercase__ ) )
UpperCAmelCase_ =os.path.join(lowercase__ , "poker_hands.txt" )
with open(lowercase__ ) as file_hand:
for line in file_hand:
UpperCAmelCase_ =line[:1_4].strip()
UpperCAmelCase_ =line[1_5:].strip()
UpperCAmelCase_ , UpperCAmelCase_ =PokerHand(lowercase__ ), PokerHand(lowercase__ )
UpperCAmelCase_ =player.compare_with(lowercase__ )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 54
| 0
|
from __future__ import annotations
from collections import deque
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase ) -> Optional[Any]:
lowerCamelCase_ = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(__a )
self.set_fail_transitions()
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> int | None:
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = 0
for character in keyword:
lowerCamelCase_ = self.find_next_state(__a , __a )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
lowerCamelCase_ = len(self.adlist ) - 1
else:
lowerCamelCase_ = next_state
self.adlist[current_state]["output"].append(__a )
def SCREAMING_SNAKE_CASE_( self ) -> None:
lowerCamelCase_ = deque()
for node in self.adlist[0]["next_states"]:
q.append(__a )
lowerCamelCase_ = 0
while q:
lowerCamelCase_ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(__a )
lowerCamelCase_ = self.adlist[r]['fail_state']
while (
self.find_next_state(__a , self.adlist[child]["value"] ) is None
and state != 0
):
lowerCamelCase_ = self.adlist[state]['fail_state']
lowerCamelCase_ = self.find_next_state(
__a , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
lowerCamelCase_ = 0
lowerCamelCase_ = (
self.adlist[child]['output']
+ self.adlist[self.adlist[child]['fail_state']]['output']
)
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> dict[str, list[int]]:
lowerCamelCase_ = {} # returns a dict with keywords and list of its occurrences
lowerCamelCase_ = 0
for i in range(len(__a ) ):
while (
self.find_next_state(__a , string[i] ) is None
and current_state != 0
):
lowerCamelCase_ = self.adlist[current_state]['fail_state']
lowerCamelCase_ = self.find_next_state(__a , string[i] )
if next_state is None:
lowerCamelCase_ = 0
else:
lowerCamelCase_ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
lowerCamelCase_ = []
result[key].append(i - len(__a ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
from math import factorial
__A ={str(digit): factorial(digit) for digit in range(1_0)}
def lowerCamelCase_ ( lowerCamelCase__ ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError("Parameter number must be int" )
if number < 0:
raise ValueError("Parameter number must be greater than or equal to 0" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(lowerCamelCase__ ) )
def lowerCamelCase_ ( lowerCamelCase__ = 6_0 , lowerCamelCase__ = 1_0_0_0_0_0_0 ):
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError("Parameters chain_length and number_limit must be int" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"Parameters chain_length and number_limit must be greater than 0" )
# the counter for the chains with the exact desired length
lowerCamelCase_ = 0
# the cached sizes of the previous chains
lowerCamelCase_ = {}
for start_chain_element in range(1 , lowerCamelCase__ ):
# The temporary set will contain the elements of the chain
lowerCamelCase_ = set()
lowerCamelCase_ = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
lowerCamelCase_ = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(lowerCamelCase__ )
chain_set_length += 1
lowerCamelCase_ = digit_factorial_sum(lowerCamelCase__ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
lowerCamelCase_ = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution()}""")
| 313
| 0
|
from __future__ import annotations
class A__ :
def __init__( self , A_ = 0 ):
'''simple docstring'''
UpperCamelCase : List[str] = key
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
UpperCamelCase : List[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(A_ ) ^ key ) for ch in content]
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
UpperCamelCase : int = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(A_ ) ^ key ) for ch in content]
def __UpperCamelCase( self , A_ , A_ = 0 ):
'''simple docstring'''
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
UpperCamelCase : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
UpperCamelCase : Union[str, Any] = ""
for ch in content:
ans += chr(ord(A_ ) ^ key )
return ans
def __UpperCamelCase( self , A_ , A_ = 0 ):
'''simple docstring'''
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
UpperCamelCase : Dict = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
UpperCamelCase : Optional[Any] = ""
for ch in content:
ans += chr(ord(A_ ) ^ key )
return ans
def __UpperCamelCase( self , A_ , A_ = 0 ):
'''simple docstring'''
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
try:
with open(A_ ) as fin, open("encrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(A_ , A_ ) )
except OSError:
return False
return True
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
assert isinstance(A_ , A_ ) and isinstance(A_ , A_ )
try:
with open(A_ ) as fin, open("decrypt.out" , "w+" ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(A_ , A_ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 629
|
import numpy as np
import qiskit
def A_ ( _lowerCAmelCase = 8 , _lowerCAmelCase = None ) -> str:
UpperCamelCase : Tuple = np.random.default_rng(seed=_lowerCAmelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
UpperCamelCase : List[str] = 6 * key_len
# Measurement basis for Alice's qubits.
UpperCamelCase : List[Any] = rng.integers(2 , size=_lowerCAmelCase )
# The set of states Alice will prepare.
UpperCamelCase : List[Any] = rng.integers(2 , size=_lowerCAmelCase )
# Measurement basis for Bob's qubits.
UpperCamelCase : Optional[int] = rng.integers(2 , size=_lowerCAmelCase )
# Quantum Circuit to simulate BB84
UpperCamelCase : List[Any] = qiskit.QuantumCircuit(_lowerCAmelCase , name="BB84" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCAmelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCAmelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCAmelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCAmelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCAmelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
UpperCamelCase : Union[str, Any] = qiskit.Aer.get_backend("aer_simulator" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
UpperCamelCase : Tuple = qiskit.execute(_lowerCAmelCase , _lowerCAmelCase , shots=1 , seed_simulator=_lowerCAmelCase )
# Returns the result of measurement.
UpperCamelCase : Optional[Any] = job.result().get_counts(_lowerCAmelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
UpperCamelCase : Tuple = "".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
UpperCamelCase : Tuple = gen_key[:key_len] if len(_lowerCAmelCase ) >= key_len else gen_key.ljust(_lowerCAmelCase , "0" )
return key
if __name__ == "__main__":
print(f"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 629
| 1
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = BarthezTokenizer
__A = BarthezTokenizerFast
__A = True
__A = True
def __UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
super().setUp()
_UpperCamelCase = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez")
tokenizer.save_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowercase_)
_UpperCamelCase = tokenizer
def __UpperCAmelCase ( self : str) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = "<pad>"
_UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_) , lowercase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_) , lowercase_)
def __UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<s>")
self.assertEqual(vocab_keys[1] , "<pad>")
self.assertEqual(vocab_keys[-1] , "<mask>")
self.assertEqual(len(lowercase_) , 101122)
def __UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 101122)
@require_torch
def __UpperCAmelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCamelCase = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCamelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCamelCase = self.tokenizer(
lowercase_ , max_length=len(lowercase_) , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt")
self.assertIsInstance(lowercase_ , lowercase_)
self.assertEqual((2, 6) , batch.input_ids.shape)
self.assertEqual((2, 6) , batch.attention_mask.shape)
_UpperCamelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase_ , lowercase_)
def __UpperCAmelCase ( self : str) -> List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = "I was born in 92000, and this is falsé."
_UpperCamelCase = tokenizer.tokenize(lowercase_)
_UpperCamelCase = rust_tokenizer.tokenize(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
_UpperCamelCase = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
_UpperCamelCase = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(lowercase_)
_UpperCamelCase = rust_tokenizer.encode(lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
@slow
def __UpperCAmelCase ( self : Any) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCamelCase = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=lowercase_ , )
| 82
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
lowerCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : str = None , lowercase_ : uuid.UUID = None , lowercase_ : List[Any]=None , lowercase_ : int=None) -> Dict:
"""simple docstring"""
if not conversation_id:
_UpperCamelCase = uuid.uuida()
if past_user_inputs is None:
_UpperCamelCase = []
if generated_responses is None:
_UpperCamelCase = []
_UpperCamelCase = conversation_id
_UpperCamelCase = past_user_inputs
_UpperCamelCase = generated_responses
_UpperCamelCase = text
def __eq__( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : str , lowercase_ : bool = False) -> Any:
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '
f'with: "{text}".')
_UpperCamelCase = text
else:
logger.warning(
f'User input added while unprocessed input was existing: "{self.new_user_input}" new input '
f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input')
else:
_UpperCamelCase = text
def __UpperCAmelCase ( self : Optional[int]) -> List[Any]:
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
_UpperCamelCase = None
def __UpperCAmelCase ( self : Dict , lowercase_ : str) -> Optional[Any]:
"""simple docstring"""
self.generated_responses.append(lowercase_)
def __UpperCAmelCase ( self : List[Any]) -> Optional[int]:
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any]) -> int:
"""simple docstring"""
_UpperCamelCase = f'Conversation id: {self.uuid} \n'
for is_user, text in self.iter_texts():
_UpperCamelCase = "user" if is_user else "bot"
output += f'{name} >> {text} \n'
return output
@add_end_docstrings(
lowerCAmelCase, R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''', )
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , *lowercase_ : Optional[Any] , **lowercase_ : str) -> List[str]:
"""simple docstring"""
super().__init__(*lowercase_ , **lowercase_)
if self.tokenizer.pad_token_id is None:
_UpperCamelCase = self.tokenizer.eos_token
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any]=None , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : str) -> Tuple:
"""simple docstring"""
_UpperCamelCase = {}
_UpperCamelCase = {}
_UpperCamelCase = {}
if min_length_for_response is not None:
_UpperCamelCase = min_length_for_response
if minimum_tokens is not None:
_UpperCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
_UpperCamelCase = generate_kwargs["max_length"]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowercase_)
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , lowercase_ : Union[Conversation, List[Conversation]] , lowercase_ : str=0 , **lowercase_ : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = super().__call__(lowercase_ , num_workers=lowercase_ , **lowercase_)
if isinstance(lowercase_ , lowercase_) and len(lowercase_) == 1:
return outputs[0]
return outputs
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Conversation , lowercase_ : Any=32) -> Dict[str, Any]:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_):
raise ValueError("ConversationalPipeline, expects Conversation as inputs")
if conversation.new_user_input is None:
raise ValueError(
f'Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. '
"Add user inputs with the conversation's `add_user_input` method")
if hasattr(self.tokenizer , "_build_conversation_input_ids"):
_UpperCamelCase = self.tokenizer._build_conversation_input_ids(lowercase_)
else:
# If the tokenizer cannot handle conversations, we default to only the old version
_UpperCamelCase = self._legacy_parse_and_tokenize(lowercase_)
if self.framework == "pt":
_UpperCamelCase = torch.LongTensor([input_ids])
elif self.framework == "tf":
_UpperCamelCase = tf.constant([input_ids])
return {"input_ids": input_ids, "conversation": conversation}
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[int]=10 , **lowercase_ : Dict) -> List[str]:
"""simple docstring"""
_UpperCamelCase = generate_kwargs.get("max_length" , self.model.config.max_length)
_UpperCamelCase = model_inputs["input_ids"].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})')
_UpperCamelCase = max_length - minimum_tokens
_UpperCamelCase = model_inputs["input_ids"][:, -trim:]
if "attention_mask" in model_inputs:
_UpperCamelCase = model_inputs["attention_mask"][:, -trim:]
_UpperCamelCase = model_inputs.pop("conversation")
_UpperCamelCase = max_length
_UpperCamelCase = self.model.generate(**lowercase_ , **lowercase_)
if self.model.config.is_encoder_decoder:
_UpperCamelCase = 1
else:
_UpperCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int=True) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = model_outputs["output_ids"]
_UpperCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
_UpperCamelCase = model_outputs["conversation"]
conversation.mark_processed()
conversation.append_response(lowercase_)
return conversation
def __UpperCAmelCase ( self : Any , lowercase_ : Conversation) -> Dict:
"""simple docstring"""
_UpperCamelCase = self.tokenizer.eos_token_id
_UpperCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_) + [eos_token_id])
else:
input_ids.extend(self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_))
if len(lowercase_) > self.tokenizer.model_max_length:
_UpperCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 82
| 1
|
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def a ( A__ ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : int = job['''started_at''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = job['''completed_at''']
SCREAMING_SNAKE_CASE__ : Dict = date_parser.parse(A__ )
SCREAMING_SNAKE_CASE__ : Tuple = date_parser.parse(A__ )
SCREAMING_SNAKE_CASE__ : Tuple = round((end_datetime - start_datetime).total_seconds() / 6_0.0 )
SCREAMING_SNAKE_CASE__ : Any = start
SCREAMING_SNAKE_CASE__ : Dict = end
SCREAMING_SNAKE_CASE__ : Optional[int] = duration_in_min
return job_info
def a ( A__ , A__=None ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if token is not None:
SCREAMING_SNAKE_CASE__ : int = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"""Bearer {token}"""}
SCREAMING_SNAKE_CASE__ : Dict = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
SCREAMING_SNAKE_CASE__ : List[Any] = requests.get(A__ , headers=A__ ).json()
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(A__ ) for job in result['''jobs''']} )
SCREAMING_SNAKE_CASE__ : Optional[Any] = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(A__ ):
SCREAMING_SNAKE_CASE__ : int = requests.get(url + f"""&page={i + 2}""" , headers=A__ ).json()
job_time.update({job['''name''']: extract_time_from_single_job(A__ ) for job in result['''jobs''']} )
return job_time
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
a_ :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
a_ :Any = parser.parse_args()
a_ :str = get_job_time(args.workflow_run_id)
a_ :Optional[int] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v['duration']}''')
| 35
|
from math import factorial
def SCREAMING_SNAKE_CASE__ ( lowercase = 20 ) -> int:
snake_case : Dict = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
snake_case : Dict = n // 2
return int(factorial(lowercase ) / (factorial(lowercase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
lowerCamelCase : List[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 587
| 0
|
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase_ : Dict = logging.get_logger(__name__)
class a__ ( __snake_case ):
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> None:
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 246
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ : Dict = logging.get_logger()
@dataclass
class a__ :
A__ : nn.Module
A__ : List[nn.Module] = field(default_factory=__snake_case )
A__ : list = field(default_factory=__snake_case )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
__a = len(list(m.modules() ) ) == 1 or isinstance(UpperCAmelCase , nn.Convad ) or isinstance(UpperCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCAmelCase )
def __call__( self , UpperCAmelCase ) -> Optional[int]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class a__ :
A__ : nn.Module
A__ : nn.Module
A__ : int = 0
A__ : List = field(default_factory=__snake_case )
A__ : List = field(default_factory=__snake_case )
def __call__( self , UpperCAmelCase ) -> List[str]:
__a = Tracker(self.dest )(UpperCAmelCase ).parametrized
__a = Tracker(self.src )(UpperCAmelCase ).parametrized
__a = list(filter(lambda UpperCAmelCase : type(UpperCAmelCase ) not in self.src_skip , UpperCAmelCase ) )
__a = list(filter(lambda UpperCAmelCase : type(UpperCAmelCase ) not in self.dest_skip , UpperCAmelCase ) )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise Exception(
f'''Numbers of operations are different. Source module has {len(UpperCAmelCase )} operations while'''
f''' destination module has {len(UpperCAmelCase )}.''' )
for dest_m, src_m in zip(UpperCAmelCase , UpperCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = True ):
print(f'''Converting {name}...''' )
with torch.no_grad():
__a = timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase ).eval()
__a = ResNetForImageClassification(__lowerCamelCase ).eval()
__a = ModuleTransfer(src=__lowerCamelCase , dest=__lowerCamelCase )
__a = torch.randn((1, 3, 224, 224) )
module_transfer(__lowerCamelCase )
assert torch.allclose(from_model(__lowerCamelCase ) , our_model(__lowerCamelCase ).logits ), "The model logits don't match the original one."
__a = f'''resnet{"-".join(name.split("resnet" ) )}'''
print(__lowerCamelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=__lowerCamelCase , )
# we can use the convnext one
__a = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=__lowerCamelCase , )
print(f'''Pushed {checkpoint_name}''' )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = True ):
__a = 'imagenet-1k-id2label.json'
__a = 1000
__a = (1, num_labels)
__a = 'huggingface/label-files'
__a = num_labels
__a = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='dataset' ) , 'r' ) )
__a = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
__a = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(__lowerCamelCase , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
lowerCamelCase_ : Any = parser.parse_args()
lowerCamelCase_ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 246
| 1
|
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
UpperCamelCase_ = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ = None , snake_case_ = None ):
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Optional[int] = os.path.abspath(os.path.join("""examples""" , """by_feature""" ) )
_lowerCAmelCase : int = os.path.abspath("""examples""" )
for item in os.listdir(_a ):
if item not in EXCLUDE_EXAMPLES:
_lowerCAmelCase : Any = os.path.join(_a , _a )
if os.path.isfile(_a ) and ".py" in item_path:
with self.subTest(
tested_script=_a , feature_script=_a , tested_section="""main()""" if parser_only else """training_function()""" , ):
_lowerCAmelCase : str = compare_against_test(
os.path.join(_a , _a ) , _a , _a , _a )
_lowerCAmelCase : Tuple = """\n""".join(_a )
if special_strings is not None:
for string in special_strings:
_lowerCAmelCase : int = diff.replace(_a , """""" )
self.assertEqual(_a , """""" )
def __UpperCamelCase ( self ):
self.one_complete_example("""complete_nlp_example.py""" , _a )
self.one_complete_example("""complete_nlp_example.py""" , _a )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = os.path.abspath(os.path.join("""examples""" , """cv_example.py""" ) )
_lowerCAmelCase : List[str] = [
""" """ * 1_6 + """{\n\n""",
""" """ * 2_0 + """\"accuracy\": eval_metric[\"accuracy\"],\n\n""",
""" """ * 2_0 + """\"f1\": eval_metric[\"f1\"],\n\n""",
""" """ * 2_0 + """\"train_loss\": total_loss.item() / len(train_dataloader),\n\n""",
""" """ * 2_0 + """\"epoch\": epoch,\n\n""",
""" """ * 1_6 + """},\n\n""",
""" """ * 1_6 + """step=epoch,\n""",
""" """ * 1_2,
""" """ * 8 + """for step, batch in enumerate(active_dataloader):\n""",
]
self.one_complete_example("""complete_cv_example.py""" , _a , _a , _a )
self.one_complete_example("""complete_cv_example.py""" , _a , _a , _a )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class a_ (UpperCAmelCase__ ):
__lowerCAmelCase : List[str] = False
@classmethod
def __UpperCamelCase ( cls ):
super().setUpClass()
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : List[str] = os.path.join(cls._tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
_lowerCAmelCase : List[str] = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def __UpperCamelCase ( cls ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """epoch_0""" ) ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
_lowerCAmelCase : List[str] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , """step_2""" ) ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
_lowerCAmelCase : List[Any] = run_command(self._launch_args + testargs , return_stdout=_a )
self.assertNotIn("""epoch 0:""" , _a )
self.assertIn("""epoch 1:""" , _a )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
_lowerCAmelCase : Any = run_command(self._launch_args + testargs , return_stdout=_a )
if torch.cuda.is_available():
_lowerCAmelCase : int = torch.cuda.device_count()
else:
_lowerCAmelCase : int = 1
if num_processes > 1:
self.assertNotIn("""epoch 0:""" , _a )
self.assertIn("""epoch 1:""" , _a )
else:
self.assertIn("""epoch 0:""" , _a )
self.assertIn("""epoch 1:""" , _a )
@slow
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = """
examples/by_feature/cross_validation.py
--num_folds 2
""".split()
with mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """0"""} ):
_lowerCAmelCase : Optional[int] = run_command(self._launch_args + testargs , return_stdout=_a )
_lowerCAmelCase : int = re.findall("""({.+})""" , _a )
_lowerCAmelCase : Optional[int] = [r for r in results if """accuracy""" in r][-1]
_lowerCAmelCase : Optional[Any] = ast.literal_eval(_a )
self.assertGreaterEqual(results["""accuracy"""] , 0.75 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = ["""examples/by_feature/multi_process_metrics.py"""]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __UpperCamelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
_lowerCAmelCase : Optional[int] = f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_a , """tracking""" ) ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = ["""examples/by_feature/gradient_accumulation.py"""]
run_command(self._launch_args + testargs )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = ["""examples/by_feature/local_sgd.py"""]
run_command(self._launch_args + testargs )
| 384
|
"""simple docstring"""
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : int = """▁"""
lowerCAmelCase : str = {"""vocab_file""": """prophetnet.tokenizer"""}
lowerCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"""
),
}
}
lowerCAmelCase : Any = {
"""microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False},
}
lowerCAmelCase : List[Any] = {
"""microsoft/xprophetnet-large-wiki100-cased""": 512,
}
def a__ ( snake_case__ ) -> int:
lowerCamelCase = collections.OrderedDict()
with open(snake_case__ , """r""" , encoding="""utf-8""" ) as reader:
lowerCamelCase = reader.readlines()
for index, token in enumerate(snake_case__ ):
lowerCamelCase = token.rstrip("""\n""" )
lowerCamelCase = index
return vocab
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["input_ids", "attention_mask"]
def __init__( self , _a , _a="[SEP]" , _a="[SEP]" , _a="[SEP]" , _a="[UNK]" , _a="[PAD]" , _a="[CLS]" , _a="[MASK]" , _a = None , **_a , ):
"""simple docstring"""
lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , sep_token=_a , unk_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
lowerCamelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
lowerCamelCase = {"""[PAD]""": 0, """[CLS]""": 1, """[SEP]""": 2, """[UNK]""": 3, """[MASK]""": 4}
for i in range(10 ):
lowerCamelCase = f'[unused{i}]'
lowerCamelCase = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
lowerCamelCase = 12
lowerCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(_a )
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase = self.__dict__.copy()
lowerCamelCase = None
return state
def __setstate__( self , _a ):
"""simple docstring"""
lowerCamelCase = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCamelCase = {}
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self , _a , _a = None , _a = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return ([0] * len(_a )) + [1]
return ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
return self.sp_model.encode(_a , out_type=_a )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase = self.sp_model.PieceToId(_a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = """""".join(_a ).replace(_a , """ """ ).strip()
return out_string
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , """wb""" ) as fi:
lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
lowerCamelCase = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 543
| 0
|
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__snake_case : Dict = Mapping[str, np.ndarray]
__snake_case : Optional[Any] = Mapping[str, Any] # Is a nested dict.
__snake_case : Optional[Any] = 0.01
@dataclasses.dataclass(frozen=lowercase_ )
class lowerCamelCase :
'''simple docstring'''
__snake_case = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
__snake_case = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
__snake_case = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
__snake_case = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
__snake_case = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
__snake_case = None
# Optional remark about the protein. Included as a comment in output PDB
# files
__snake_case = None
# Templates used to generate this protein (prediction-only)
__snake_case = None
# Chain corresponding to each parent
__snake_case = None
def __lowerCamelCase ( __snake_case : str ) -> Protein:
"""simple docstring"""
A__ : str =r"""(\[[A-Z]+\]\n)"""
A__ : List[str] =[tag.strip() for tag in re.split(__snake_case, __snake_case ) if len(__snake_case ) > 0]
A__ : Iterator[Tuple[str, List[str]]] =zip(tags[0::2], [l.split("""\n""" ) for l in tags[1::2]] )
A__ : List[str] =["N", "CA", "C"]
A__ : Optional[int] =None
A__ : int =None
A__ : Dict =None
for g in groups:
if "[PRIMARY]" == g[0]:
A__ : Optional[int] =g[1][0].strip()
for i in range(len(__snake_case ) ):
if seq[i] not in residue_constants.restypes:
A__ : List[str] ="""X""" # FIXME: strings are immutable
A__ : Optional[Any] =np.array(
[residue_constants.restype_order.get(__snake_case, residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
A__ : List[List[float]] =[]
for axis in range(3 ):
tertiary.append(list(map(__snake_case, g[1][axis].split() ) ) )
A__ : Optional[int] =np.array(__snake_case )
A__ : Dict =np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__snake_case ):
A__ : Optional[int] =np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
A__ : Tuple =np.array(list(map({"""-""": 0, """+""": 1}.get, g[1][0].strip() ) ) )
A__ : Dict =np.zeros(
(
len(__snake_case ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__snake_case ):
A__ : Optional[int] =1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__snake_case, atom_mask=__snake_case, aatype=__snake_case, residue_index=np.arange(len(__snake_case ) ), b_factors=__snake_case, )
def __lowerCamelCase ( __snake_case : Protein, __snake_case : int = 0 ) -> List[str]:
"""simple docstring"""
A__ : List[str] =[]
A__ : str =prot.remark
if remark is not None:
pdb_headers.append(f"REMARK {remark}" )
A__ : Optional[Any] =prot.parents
A__ : Any =prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
A__ : Tuple =[p for i, p in zip(__snake_case, __snake_case ) if i == chain_id]
if parents is None or len(__snake_case ) == 0:
A__ : Optional[Any] =["""N/A"""]
pdb_headers.append(f"PARENT {' '.join(__snake_case )}" )
return pdb_headers
def __lowerCamelCase ( __snake_case : Protein, __snake_case : str ) -> str:
"""simple docstring"""
A__ : List[str] =[]
A__ : Union[str, Any] =pdb_str.split("""\n""" )
A__ : Dict =prot.remark
if remark is not None:
out_pdb_lines.append(f"REMARK {remark}" )
A__ : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
A__ : str =[]
if prot.parents_chain_index is not None:
A__ : Dict[str, List[str]] ={}
for p, i in zip(prot.parents, prot.parents_chain_index ):
parent_dict.setdefault(str(__snake_case ), [] )
parent_dict[str(__snake_case )].append(__snake_case )
A__ : Any =max([int(__snake_case ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
A__ : List[str] =parent_dict.get(str(__snake_case ), ["""N/A"""] )
parents_per_chain.append(__snake_case )
else:
parents_per_chain.append(list(prot.parents ) )
else:
A__ : Dict =[["""N/A"""]]
def make_parent_line(__snake_case : Sequence[str] ) -> str:
return f"PARENT {' '.join(__snake_case )}"
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
A__ : Any =0
for i, l in enumerate(__snake_case ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__snake_case )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__snake_case ):
A__ : Dict =parents_per_chain[chain_counter]
else:
A__ : List[str] =["""N/A"""]
out_pdb_lines.append(make_parent_line(__snake_case ) )
return "\n".join(__snake_case )
def __lowerCamelCase ( __snake_case : Protein ) -> str:
"""simple docstring"""
A__ : Any =residue_constants.restypes + ["""X"""]
def res_atoa(__snake_case : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r], """UNK""" )
A__ : Any =residue_constants.atom_types
A__ : List[str] =[]
A__ : Union[str, Any] =prot.atom_mask
A__ : List[str] =prot.aatype
A__ : Union[str, Any] =prot.atom_positions
A__ : List[Any] =prot.residue_index.astype(np.intaa )
A__ : Union[str, Any] =prot.b_factors
A__ : int =prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("""Invalid aatypes.""" )
A__ : Dict =get_pdb_headers(__snake_case )
if len(__snake_case ) > 0:
pdb_lines.extend(__snake_case )
A__ : Tuple =aatype.shape[0]
A__ : Optional[Any] =1
A__ : List[str] =0
A__ : Union[str, Any] =string.ascii_uppercase
A__ : List[str] =None
# Add all atom sites.
for i in range(__snake_case ):
A__ : Tuple =res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__snake_case, atom_positions[i], atom_mask[i], b_factors[i] ):
if mask < 0.5:
continue
A__ : List[Any] ="""ATOM"""
A__ : Union[str, Any] =atom_name if len(__snake_case ) == 4 else f" {atom_name}"
A__ : List[str] =""""""
A__ : Any =""""""
A__ : Optional[Any] =1.00
A__ : str =atom_name[0] # Protein supports only C, N, O, S, this works.
A__ : Tuple =""""""
A__ : List[str] ="""A"""
if chain_index is not None:
A__ : int =chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
A__ : List[str] =(
f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
f"{res_name_a:>3} {chain_tag:>1}"
f"{residue_index[i]:>4}{insertion_code:>1} "
f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
f"{occupancy:>6.2f}{b_factor:>6.2f} "
f"{element:>2}{charge:>2}"
)
pdb_lines.append(__snake_case )
atom_index += 1
A__ : List[str] =i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
A__ : Optional[Any] =True
A__ : int =chain_index[i + 1]
if should_terminate:
# Close the chain.
A__ : int ="""TER"""
A__ : Optional[int] =(
f"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(__snake_case )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__snake_case, __snake_case ) )
pdb_lines.append("""END""" )
pdb_lines.append("""""" )
return "\n".join(__snake_case )
def __lowerCamelCase ( __snake_case : Protein ) -> np.ndarray:
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def __lowerCamelCase ( __snake_case : FeatureDict, __snake_case : ModelOutput, __snake_case : Optional[np.ndarray] = None, __snake_case : Optional[np.ndarray] = None, __snake_case : Optional[str] = None, __snake_case : Optional[Sequence[str]] = None, __snake_case : Optional[Sequence[int]] = None, ) -> Protein:
"""simple docstring"""
return Protein(
aatype=features["""aatype"""], atom_positions=result["""final_atom_positions"""], atom_mask=result["""final_atom_mask"""], residue_index=features["""residue_index"""] + 1, b_factors=b_factors if b_factors is not None else np.zeros_like(result["""final_atom_mask"""] ), chain_index=__snake_case, remark=__snake_case, parents=__snake_case, parents_chain_index=__snake_case, )
| 707
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
class lowerCamelCase ( lowercase_ , lowercase_ ):
'''simple docstring'''
@register_to_config
def __init__( self : List[str] , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , lowerCAmelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , lowerCAmelCase_ : Tuple[int] = (64,) , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : str = "silu" , lowerCAmelCase_ : int = 3 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : int = 2_56 , lowerCAmelCase_ : int = 32 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : float = 0.18215 , lowerCAmelCase_ : str = "group" , ) -> List[str]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
A__ : Optional[Any] =Encoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , down_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , double_z=lowerCAmelCase_ , )
A__ : Dict =vq_embed_dim if vq_embed_dim is not None else latent_channels
A__ : Union[str, Any] =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
A__ : Optional[int] =VectorQuantizer(lowerCAmelCase_ , lowerCAmelCase_ , beta=0.25 , remap=lowerCAmelCase_ , sane_index_shape=lowerCAmelCase_ )
A__ : Tuple =nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , 1 )
# pass init params to Decoder
A__ : Optional[Any] =Decoder(
in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , up_block_types=lowerCAmelCase_ , block_out_channels=lowerCAmelCase_ , layers_per_block=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , norm_type=lowerCAmelCase_ , )
@apply_forward_hook
def lowercase__ ( self : List[str] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> VQEncoderOutput:
'''simple docstring'''
A__ : Dict =self.encoder(lowerCAmelCase_ )
A__ : Union[str, Any] =self.quant_conv(lowerCAmelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCAmelCase_ )
@apply_forward_hook
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
# also go through quantization layer
if not force_not_quantize:
A__ , A__ , A__ : Tuple =self.quantize(lowerCAmelCase_ )
else:
A__ : List[str] =h
A__ : Dict =self.post_quant_conv(lowerCAmelCase_ )
A__ : List[Any] =self.decoder(lowerCAmelCase_ , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
def lowercase__ ( self : str , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
A__ : Optional[int] =sample
A__ : Union[str, Any] =self.encode(lowerCAmelCase_ ).latents
A__ : Tuple =self.decode(lowerCAmelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCAmelCase_ )
| 687
| 0
|
"""simple docstring"""
import datasets
from .evaluate import evaluate
_A = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
_A = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
_A = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def A ( self : List[Any] )-> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def A ( self : str , A_ : str , A_ : List[Any] )-> Optional[int]:
__UpperCamelCase = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
__UpperCamelCase = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
__UpperCamelCase = evaluate(dataset=lowerCAmelCase__ , predictions=lowerCAmelCase__ )
return score
| 505
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
UpperCAmelCase : Tuple = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _A ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
if args.student_type == "roberta":
a__ : List[str] =False
elif args.student_type == "gpt2":
a__ : Any =False
def _A ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
if args.student_type == "roberta":
a__ : List[str] =False
def _A ( ):
"""simple docstring"""
a__ : List[str] =argparse.ArgumentParser(description="Training" )
parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , )
parser.add_argument(
"--student_type" , type=SCREAMING_SNAKE_CASE , choices=["distilbert", "roberta", "gpt2"] , required=SCREAMING_SNAKE_CASE , help="The student type (DistilBERT, RoBERTa)." , )
parser.add_argument("--student_config" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=SCREAMING_SNAKE_CASE , help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="The teacher model." )
parser.add_argument("--temperature" , default=2.0 , type=SCREAMING_SNAKE_CASE , help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce" , default=0.5 , type=SCREAMING_SNAKE_CASE , help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm" , default=0.0 , type=SCREAMING_SNAKE_CASE , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , )
parser.add_argument("--alpha_clm" , default=0.5 , type=SCREAMING_SNAKE_CASE , help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse" , default=0.0 , type=SCREAMING_SNAKE_CASE , help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos" , default=0.0 , type=SCREAMING_SNAKE_CASE , help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop" , default=0.1_5 , type=SCREAMING_SNAKE_CASE , help="Proportion of tokens for which we need to make a prediction." , )
parser.add_argument("--word_mask" , default=0.8 , type=SCREAMING_SNAKE_CASE , help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep" , default=0.1 , type=SCREAMING_SNAKE_CASE , help="Proportion of tokens to keep." )
parser.add_argument("--word_rand" , default=0.1 , type=SCREAMING_SNAKE_CASE , help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing" , default=0.7 , type=SCREAMING_SNAKE_CASE , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , )
parser.add_argument("--token_counts" , type=SCREAMING_SNAKE_CASE , help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , )
parser.add_argument(
"--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , )
parser.add_argument(
"--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , )
parser.add_argument("--n_epoch" , type=SCREAMING_SNAKE_CASE , default=3 , help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size" , type=SCREAMING_SNAKE_CASE , default=5 , help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , )
parser.add_argument(
"--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE , default=50 , help="Gradient accumulation for larger training batches." , )
parser.add_argument("--warmup_prop" , default=0.0_5 , type=SCREAMING_SNAKE_CASE , help="Linear warmup proportion." )
parser.add_argument("--weight_decay" , default=0.0 , type=SCREAMING_SNAKE_CASE , help="Weight decay if we apply some." )
parser.add_argument("--learning_rate" , default=5e-4 , type=SCREAMING_SNAKE_CASE , help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon" , default=1e-6 , type=SCREAMING_SNAKE_CASE , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , default=5.0 , type=SCREAMING_SNAKE_CASE , help="Max gradient norm." )
parser.add_argument("--initializer_range" , default=0.0_2 , type=SCREAMING_SNAKE_CASE , help="Random initialization range." )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=SCREAMING_SNAKE_CASE , default="O1" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_gpu" , type=SCREAMING_SNAKE_CASE , default=1 , help="Number of GPUs in the node." )
parser.add_argument("--local_rank" , type=SCREAMING_SNAKE_CASE , default=-1 , help="Distributed training - Local rank" )
parser.add_argument("--seed" , type=SCREAMING_SNAKE_CASE , default=56 , help="Random seed" )
parser.add_argument("--log_interval" , type=SCREAMING_SNAKE_CASE , default=500 , help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval" , type=SCREAMING_SNAKE_CASE , default=4_000 , help="Checkpoint interval." )
a__ : Union[str, Any] =parser.parse_args()
sanity_checks(SCREAMING_SNAKE_CASE )
# ARGS #
init_gpu_params(SCREAMING_SNAKE_CASE )
set_seed(SCREAMING_SNAKE_CASE )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(f'''Param: {args}''' )
with open(os.path.join(args.dump_path , "parameters.json" ) , "w" ) as f:
json.dump(vars(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , indent=4 )
git_log(args.dump_path )
a__ , a__ , a__ : Tuple =MODEL_CLASSES[args.student_type]
a__ , a__ , a__ : Any =MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
a__ : Any =teacher_tokenizer_class.from_pretrained(args.teacher_name )
a__ : List[Any] ={}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
a__ : List[str] =tokenizer.all_special_tokens.index(SCREAMING_SNAKE_CASE )
a__ : Dict =tokenizer.all_special_ids[idx]
logger.info(f'''Special tokens {special_tok_ids}''' )
a__ : List[Any] =special_tok_ids
a__ : int =tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'''Loading data from {args.data_file}''' )
with open(args.data_file , "rb" ) as fp:
a__ : Tuple =pickle.load(SCREAMING_SNAKE_CASE )
if args.mlm:
logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , "rb" ) as fp:
a__ : Union[str, Any] =pickle.load(SCREAMING_SNAKE_CASE )
a__ : Tuple =np.maximum(SCREAMING_SNAKE_CASE , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
a__ : Dict =0.0 # do not predict special tokens
a__ : Optional[Any] =torch.from_numpy(SCREAMING_SNAKE_CASE )
else:
a__ : Dict =None
a__ : Any =LmSeqsDataset(params=SCREAMING_SNAKE_CASE , data=SCREAMING_SNAKE_CASE )
logger.info("Data loader created." )
# STUDENT #
logger.info(f'''Loading student config from {args.student_config}''' )
a__ : Dict =student_config_class.from_pretrained(args.student_config )
a__ : Optional[Any] =True
if args.student_pretrained_weights is not None:
logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' )
a__ : List[Any] =student_model_class.from_pretrained(args.student_pretrained_weights , config=SCREAMING_SNAKE_CASE )
else:
a__ : List[Any] =student_model_class(SCREAMING_SNAKE_CASE )
if args.n_gpu > 0:
student.to(f'''cuda:{args.local_rank}''' )
logger.info("Student loaded." )
# TEACHER #
a__ : Any =teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=SCREAMING_SNAKE_CASE )
if args.n_gpu > 0:
teacher.to(f'''cuda:{args.local_rank}''' )
logger.info(f'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
a__ : Optional[Any] =Distiller(
params=SCREAMING_SNAKE_CASE , dataset=SCREAMING_SNAKE_CASE , token_probs=SCREAMING_SNAKE_CASE , student=SCREAMING_SNAKE_CASE , teacher=SCREAMING_SNAKE_CASE )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 563
| 0
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
_snake_case = ''
_snake_case = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , )-> List[str]:
'''simple docstring'''
super().__init__(self , **SCREAMING_SNAKE_CASE_ )
__UpperCamelCase = repo_info
__UpperCamelCase = token
__UpperCamelCase = None
def A__ ( self )-> List[str]:
'''simple docstring'''
if self.dir_cache is None:
__UpperCamelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__UpperCamelCase = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(SCREAMING_SNAKE_CASE_ ): {'''name''': str(SCREAMING_SNAKE_CASE_ ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "rb" , **SCREAMING_SNAKE_CASE_ , )-> Tuple:
'''simple docstring'''
if not isinstance(self.repo_info , SCREAMING_SNAKE_CASE_ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
__UpperCamelCase = hf_hub_url(self.repo_info.id , SCREAMING_SNAKE_CASE_ , revision=self.repo_info.sha )
return fsspec.open(
SCREAMING_SNAKE_CASE_ , mode=SCREAMING_SNAKE_CASE_ , headers=get_authentication_headers_for_url(SCREAMING_SNAKE_CASE_ , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def A__ ( self , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )-> Tuple:
'''simple docstring'''
self._get_dirs()
__UpperCamelCase = self._strip_protocol(SCREAMING_SNAKE_CASE_ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ )-> Tuple:
'''simple docstring'''
self._get_dirs()
__UpperCamelCase = PurePosixPath(path.strip('''/''' ) )
__UpperCamelCase = {}
for p, f in self.dir_cache.items():
__UpperCamelCase = PurePosixPath(p.strip('''/''' ) )
__UpperCamelCase = p.parent
if root == path:
__UpperCamelCase = f
__UpperCamelCase = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 721
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ : Union[str, Any] = 1_6
lowercase__ : Tuple = 3_2
def A_ ( snake_case : Accelerator , snake_case : int = 16 ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__UpperCamelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case : Tuple ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case , max_length=snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCamelCase = datasets.map(
snake_case , batched=snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
__UpperCamelCase = 8
else:
__UpperCamelCase = None
return tokenizer.pad(
snake_case , padding='''longest''' , max_length=snake_case , pad_to_multiple_of=snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
__UpperCamelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
__UpperCamelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ : List[str] = mocked_dataloaders # noqa: F811
def A_ ( snake_case : str , snake_case : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , snake_case ) == "1":
__UpperCamelCase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__UpperCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
__UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase = config['''lr''']
__UpperCamelCase = int(config['''num_epochs'''] )
__UpperCamelCase = int(config['''seed'''] )
__UpperCamelCase = int(config['''batch_size'''] )
set_seed(snake_case )
__UpperCamelCase , __UpperCamelCase = get_dataloaders(snake_case , snake_case )
__UpperCamelCase = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
__UpperCamelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__UpperCamelCase = batch_size // MAX_GPU_BATCH_SIZE
__UpperCamelCase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
__UpperCamelCase = AdamW(params=model.parameters() , lr=snake_case )
# Instantiate scheduler
__UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=snake_case , num_warmup_steps=100 , num_training_steps=(len(snake_case ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
snake_case , snake_case , snake_case , snake_case , snake_case )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__UpperCamelCase = os.path.split(snake_case )[-1].split('''.''' )[0]
accelerator.init_trackers(snake_case , snake_case )
# Now we train the model
for epoch in range(snake_case ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__UpperCamelCase = 0
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCamelCase = model(**snake_case )
__UpperCamelCase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase = model(**snake_case )
__UpperCamelCase = outputs.logits.argmax(dim=-1 )
__UpperCamelCase , __UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=snake_case , references=snake_case , )
__UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , snake_case )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(snake_case ),
'''epoch''': epoch,
} , step=snake_case , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def A_ ( ) -> int:
'''simple docstring'''
__UpperCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=snake_case , default=snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=snake_case , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case , snake_case )
if __name__ == "__main__":
main()
| 451
| 0
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class UpperCAmelCase__ :
def __init__( self : Any,__A : int=2,__A : Any=3,__A : Optional[int]=6_4,__A : Tuple=None ):
_lowerCamelCase : int = np.random.default_rng(__A )
_lowerCamelCase : List[str] = length
_lowerCamelCase : Optional[Any] = rng.normal(size=(length,) ).astype(np.floataa )
_lowerCamelCase : Optional[int] = a * self.x + b + rng.normal(scale=0.1,size=(length,) ).astype(np.floataa )
def __len__( self : Dict ):
return self.length
def __getitem__( self : str,__A : List[str] ):
return {"x": self.x[i], "y": self.y[i]}
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : Optional[Any]=0,__A : Optional[int]=0,__A : Dict=False ):
super().__init__()
_lowerCamelCase : Tuple = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : List[str] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : Optional[int] = True
def lowerCamelCase_ ( self : List[str],__A : Tuple=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a[0] + self.b[0]
class UpperCAmelCase__ ( torch.nn.Module ):
def __init__( self : Union[str, Any],__A : List[str]=0,__A : List[str]=0,__A : int=False ):
super().__init__()
_lowerCamelCase : Optional[int] = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Dict = torch.nn.Parameter(torch.tensor(__A ).float() )
_lowerCamelCase : Tuple = True
def lowerCamelCase_ ( self : str,__A : List[Any]=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCamelCase : Optional[Any] = False
return x * self.a + self.b
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
_lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
_lowerCamelCase : List[Any] = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
_lowerCamelCase : int = load_dataset("csv" , data_files=_lowerCAmelCase )
_lowerCamelCase : Dict = datasets["train"].unique("label" )
_lowerCamelCase : Optional[Any] = {v: i for i, v in enumerate(_lowerCAmelCase )}
def tokenize_function(_lowerCAmelCase : int ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Optional[int] = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="max_length" )
if "label" in examples:
_lowerCamelCase : str = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCamelCase : Optional[Any] = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(_lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(_lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_lowerCamelCase : str = DataLoader(tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=2 )
_lowerCamelCase : Optional[int] = DataLoader(tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 44
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __UpperCamelCase ( lowercase__ : str ) -> None:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ : str = analyze_text(lowercase__ )
lowerCAmelCase_ : Optional[int] = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
lowerCAmelCase_ : Any = sum(single_char_strings.values() )
# one length string
lowerCAmelCase_ : Optional[Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCAmelCase_ : Union[str, Any] = single_char_strings[ch]
lowerCAmelCase_ : Tuple = my_str / all_sum
my_fir_sum += prob * math.loga(lowercase__ ) # entropy formula.
# print entropy
print(f'{round(-1 * my_fir_sum ):.1f}' )
# two len string
lowerCAmelCase_ : Any = sum(two_char_strings.values() )
lowerCAmelCase_ : List[str] = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCAmelCase_ : List[str] = cha + cha
if sequence in two_char_strings:
lowerCAmelCase_ : Any = two_char_strings[sequence]
lowerCAmelCase_ : Optional[Any] = int(lowercase__ ) / all_sum
my_sec_sum += prob * math.loga(lowercase__ )
# print second entropy
print(f'{round(-1 * my_sec_sum ):.1f}' )
# print the difference between them
print(f'{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}' )
def __UpperCamelCase ( lowercase__ : str ) -> tuple[dict, dict]:
'''simple docstring'''
lowerCAmelCase_ : Any = Counter() # type: ignore
lowerCAmelCase_ : Any = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(lowercase__ ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 600
| 0
|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class a_ ( _snake_case ):
UpperCamelCase__ : torch.FloatTensor
class a_ ( _snake_case , _snake_case ):
@register_to_config
def __init__( self :Any , _lowercase :int = 32 , _lowercase :int = 64 , _lowercase :int = 20 , _lowercase :int = 768 , _lowercase :Optional[Any]=77 , _lowercase :List[Any]=4 , _lowercase :float = 0.0 , _lowercase :str = "silu" , _lowercase :Optional[str] = None , _lowercase :Optional[str] = None , _lowercase :Optional[str] = "linear" , _lowercase :Optional[str] = "prd" , _lowercase :Optional[int] = None , _lowercase :Optional[int] = None , _lowercase :Optional[int] = None , ) -> List[str]:
super().__init__()
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = attention_head_dim
UpperCAmelCase_ = num_attention_heads * attention_head_dim
UpperCAmelCase_ = additional_embeddings
UpperCAmelCase_ = time_embed_dim or inner_dim
UpperCAmelCase_ = embedding_proj_dim or embedding_dim
UpperCAmelCase_ = clip_embed_dim or embedding_dim
UpperCAmelCase_ = Timesteps(_lowercase , _lowercase , 0)
UpperCAmelCase_ = TimestepEmbedding(_lowercase , _lowercase , out_dim=_lowercase , act_fn=_lowercase)
UpperCAmelCase_ = nn.Linear(_lowercase , _lowercase)
if embedding_proj_norm_type is None:
UpperCAmelCase_ = None
elif embedding_proj_norm_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(_lowercase)
else:
raise ValueError(f"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}")
UpperCAmelCase_ = nn.Linear(_lowercase , _lowercase)
if encoder_hid_proj_type is None:
UpperCAmelCase_ = None
elif encoder_hid_proj_type == "linear":
UpperCAmelCase_ = nn.Linear(_lowercase , _lowercase)
else:
raise ValueError(f"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}")
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _lowercase))
if added_emb_type == "prd":
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , 1 , _lowercase))
elif added_emb_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(
f"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.")
UpperCAmelCase_ = nn.ModuleList(
[
BasicTransformerBlock(
_lowercase , _lowercase , _lowercase , dropout=_lowercase , activation_fn='''gelu''' , attention_bias=_lowercase , )
for d in range(_lowercase)
])
if norm_in_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(_lowercase)
elif norm_in_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(f"Unsupported norm_in_type: {norm_in_type}.")
UpperCAmelCase_ = nn.LayerNorm(_lowercase)
UpperCAmelCase_ = nn.Linear(_lowercase , _lowercase)
UpperCAmelCase_ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10_000.0)
causal_attention_mask.triu_(1)
UpperCAmelCase_ = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , _lowercase , persistent=_lowercase)
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , _lowercase))
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , _lowercase))
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def __a ( self :List[str]) -> Dict[str, AttentionProcessor]:
UpperCAmelCase_ = {}
def fn_recursive_add_processors(_lowercase :str , _lowercase :torch.nn.Module , _lowercase :Dict[str, AttentionProcessor]):
if hasattr(_lowercase , '''set_processor'''):
UpperCAmelCase_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"{name}.{sub_name}" , _lowercase , _lowercase)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_lowercase , _lowercase , _lowercase)
return processors
def __a ( self :Optional[Any] , _lowercase :Union[AttentionProcessor, Dict[str, AttentionProcessor]]) -> List[Any]:
UpperCAmelCase_ = len(self.attn_processors.keys())
if isinstance(_lowercase , _lowercase) and len(_lowercase) != count:
raise ValueError(
f"A dict of processors was passed, but the number of processors {len(_lowercase)} does not match the"
f" number of attention layers: {count}. Please make sure to pass {count} processor classes.")
def fn_recursive_attn_processor(_lowercase :str , _lowercase :torch.nn.Module , _lowercase :str):
if hasattr(_lowercase , '''set_processor'''):
if not isinstance(_lowercase , _lowercase):
module.set_processor(_lowercase)
else:
module.set_processor(processor.pop(f"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"{name}.{sub_name}" , _lowercase , _lowercase)
for name, module in self.named_children():
fn_recursive_attn_processor(_lowercase , _lowercase , _lowercase)
def __a ( self :Optional[Any]) -> Union[str, Any]:
self.set_attn_processor(AttnProcessor())
def __a ( self :Tuple , _lowercase :Union[str, Any] , _lowercase :Union[torch.Tensor, float, int] , _lowercase :torch.FloatTensor , _lowercase :Optional[torch.FloatTensor] = None , _lowercase :Optional[torch.BoolTensor] = None , _lowercase :bool = True , ) -> Optional[Any]:
UpperCAmelCase_ = hidden_states.shape[0]
UpperCAmelCase_ = timestep
if not torch.is_tensor(_lowercase):
UpperCAmelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device)
elif torch.is_tensor(_lowercase) and len(timesteps.shape) == 0:
UpperCAmelCase_ = timesteps[None].to(hidden_states.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ = timesteps * torch.ones(_lowercase , dtype=timesteps.dtype , device=timesteps.device)
UpperCAmelCase_ = self.time_proj(_lowercase)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCAmelCase_ = timesteps_projected.to(dtype=self.dtype)
UpperCAmelCase_ = self.time_embedding(_lowercase)
if self.embedding_proj_norm is not None:
UpperCAmelCase_ = self.embedding_proj_norm(_lowercase)
UpperCAmelCase_ = self.embedding_proj(_lowercase)
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCAmelCase_ = self.encoder_hidden_states_proj(_lowercase)
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''')
UpperCAmelCase_ = self.proj_in(_lowercase)
UpperCAmelCase_ = self.positional_embedding.to(hidden_states.dtype)
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
if encoder_hidden_states is not None:
additional_embeds.append(_lowercase)
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape) == 2:
UpperCAmelCase_ = proj_embeddings[:, None, :]
if len(hidden_states.shape) == 2:
UpperCAmelCase_ = hidden_states[:, None, :]
UpperCAmelCase_ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCAmelCase_ = self.prd_embedding.to(hidden_states.dtype).expand(_lowercase , -1 , -1)
additional_embeds.append(_lowercase)
UpperCAmelCase_ = torch.cat(
_lowercase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCAmelCase_ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCAmelCase_ = F.pad(
_lowercase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCAmelCase_ = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCAmelCase_ = (1 - attention_mask.to(hidden_states.dtype)) * -10_000.0
UpperCAmelCase_ = F.pad(_lowercase , (0, self.additional_embeddings) , value=0.0)
UpperCAmelCase_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype)
UpperCAmelCase_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0)
if self.norm_in is not None:
UpperCAmelCase_ = self.norm_in(_lowercase)
for block in self.transformer_blocks:
UpperCAmelCase_ = block(_lowercase , attention_mask=_lowercase)
UpperCAmelCase_ = self.norm_out(_lowercase)
if self.prd_embedding is not None:
UpperCAmelCase_ = hidden_states[:, -1]
else:
UpperCAmelCase_ = hidden_states[:, additional_embeddings_len:]
UpperCAmelCase_ = self.proj_to_clip_embeddings(_lowercase)
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_lowercase)
def __a ( self :Tuple , _lowercase :Tuple) -> str:
UpperCAmelCase_ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 561
|
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCamelCase_ = 16
UpperCamelCase_ = 32
def A ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return int(x / 2**20 )
class a_ :
def __enter__( self :Any) -> str:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
UpperCAmelCase_ = torch.cuda.memory_allocated()
return self
def __exit__( self :Tuple , *_lowercase :Tuple) -> Union[str, Any]:
gc.collect()
torch.cuda.empty_cache()
UpperCAmelCase_ = torch.cuda.memory_allocated()
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
UpperCAmelCase_ = bamb(self.end - self.begin)
UpperCAmelCase_ = bamb(self.peak - self.begin)
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def A ( __UpperCAmelCase , __UpperCAmelCase = 16 , __UpperCAmelCase = "bert-base-cased" , __UpperCAmelCase = 320 , __UpperCAmelCase = 160 , ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = AutoTokenizer.from_pretrained(__UpperCAmelCase )
UpperCAmelCase_ = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': f"train[:{n_train}]", '''validation''': f"validation[:{n_val}]"} )
def tokenize_function(__UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCAmelCase_ = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=__UpperCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCAmelCase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCAmelCase_ = DataLoader(
tokenized_datasets['''train'''] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
UpperCAmelCase_ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
return train_dataloader, eval_dataloader
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ = config['''lr''']
UpperCAmelCase_ = int(config['''num_epochs'''] )
UpperCAmelCase_ = int(config['''seed'''] )
UpperCAmelCase_ = int(config['''batch_size'''] )
UpperCAmelCase_ = args.model_name_or_path
set_seed(__UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = get_dataloaders(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(__UpperCAmelCase , return_dict=__UpperCAmelCase )
# Instantiate optimizer
UpperCAmelCase_ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCAmelCase_ = optimizer_cls(params=model.parameters() , lr=__UpperCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
UpperCAmelCase_ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
UpperCAmelCase_ = 1
UpperCAmelCase_ = (len(__UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=__UpperCAmelCase , num_warmup_steps=0 , num_training_steps=__UpperCAmelCase , )
else:
UpperCAmelCase_ = DummyScheduler(__UpperCAmelCase , total_num_steps=__UpperCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase_ = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCAmelCase_ = 0
# Now we train the model
UpperCAmelCase_ = {}
for epoch in range(__UpperCAmelCase , __UpperCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__UpperCAmelCase ):
UpperCAmelCase_ = model(**__UpperCAmelCase )
UpperCAmelCase_ = outputs.loss
UpperCAmelCase_ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
UpperCAmelCase_ = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f"epoch-{epoch}"] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def A ( ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=__UpperCAmelCase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__UpperCAmelCase , )
parser.add_argument(
'''--output_dir''' , type=__UpperCAmelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=__UpperCAmelCase , default=__UpperCAmelCase , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=__UpperCAmelCase , default=320 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=__UpperCAmelCase , default=160 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=__UpperCAmelCase , default=1 , help='''Number of train epochs.''' , )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
main()
| 561
| 1
|
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[Any] ) -> str:
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
__snake_case = str(bin(snake_case_ ) )[2:] # remove the leading "0b"
__snake_case = str(bin(snake_case_ ) )[2:] # remove the leading "0b"
__snake_case = max(len(snake_case_ ) , len(snake_case_ ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(snake_case_ ) , b_binary.zfill(snake_case_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 592
|
'''simple docstring'''
import random
def _A ( snake_case , snake_case , snake_case = False ) -> dict:
_lowercase : dict = {i: [] for i in range(snake_case )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(snake_case )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(snake_case ):
for j in range(i + 1 , snake_case ):
if random.random() < probability:
graph[i].append(snake_case )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(snake_case )
return graph
def _A ( snake_case ) -> dict:
return {
i: [j for j in range(snake_case ) if i != j] for i in range(snake_case )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class __snake_case ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
UpperCamelCase__ : Tuple = """bridgetower_vision_model"""
def __init__( self , a_=768 , a_=12 , a_=3 , a_=16 , a_=288 , a_=1 , a_=1E-0_5 , a_=False , a_=True , a_=False , **a_ , ):
super().__init__(**a_ )
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_channels
a__ = patch_size
a__ = image_size
a__ = initializer_factor
a__ = layer_norm_eps
a__ = stop_gradient
a__ = share_layernorm
a__ = remove_last_layer
@classmethod
def _a ( cls , a_ , **a_ ):
a__ , a__ = cls.get_config_dict(a_ , **a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
a__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(a_ , **a_ )
class __snake_case ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
UpperCamelCase__ : Dict = """bridgetower_text_model"""
def __init__( self , a_=50_265 , a_=768 , a_=12 , a_=12 , a_=1 , a_=3_072 , a_="gelu" , a_=0.1 , a_=0.1 , a_=514 , a_=1 , a_=1E-0_5 , a_=1 , a_=0 , a_=2 , a_="absolute" , a_=True , **a_ , ):
super().__init__(**a_ )
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = hidden_act
a__ = initializer_factor
a__ = intermediate_size
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = layer_norm_eps
a__ = position_embedding_type
a__ = use_cache
a__ = pad_token_id
a__ = bos_token_id
a__ = eos_token_id
@classmethod
def _a ( cls , a_ , **a_ ):
a__ , a__ = cls.get_config_dict(a_ , **a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
a__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(a_ , **a_ )
class __snake_case ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
UpperCamelCase__ : Any = """bridgetower"""
def __init__( self , a_=True , a_="gelu" , a_=768 , a_=1 , a_=1E-0_5 , a_=False , a_="add" , a_=12 , a_=6 , a_=False , a_=False , a_=None , a_=None , **a_ , ):
# TODO: remove this once the Hub files are updated.
a__ = kwargs.pop("""text_config_dict""" , a_ )
a__ = kwargs.pop("""vision_config_dict""" , a_ )
super().__init__(**a_ )
a__ = share_cross_modal_transformer_layers
a__ = hidden_act
a__ = hidden_size
a__ = initializer_factor
a__ = layer_norm_eps
a__ = share_link_tower_layers
a__ = link_tower_type
a__ = num_attention_heads
a__ = num_hidden_layers
a__ = tie_word_embeddings
a__ = init_layernorm_from_vision_encoder
if text_config is None:
a__ = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
a__ = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
a__ = BridgeTowerTextConfig(**a_ )
a__ = BridgeTowerVisionConfig(**a_ )
@classmethod
def _a ( cls , a_ , a_ , **a_ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a_ )
def _a ( self ):
a__ = copy.deepcopy(self.__dict__ )
a__ = self.text_config.to_dict()
a__ = self.vision_config.to_dict()
a__ = self.__class__.model_type
return output
| 715
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase = 16
UpperCAmelCase = 32
def A_ ( __a : Accelerator , __a : int = 16 ):
"""simple docstring"""
a__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
a__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__a : int ):
# max_length=None => use the model max length (it's actually the default)
a__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
a__ = datasets.map(
__a , batched=__a , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__a : Optional[int] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
a__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
a__ = 16
elif accelerator.mixed_precision != "no":
a__ = 8
else:
a__ = None
return tokenizer.pad(
__a , padding="""longest""" , max_length=__a , pad_to_multiple_of=__a , return_tensors="""pt""" , )
# Instantiate dataloaders.
a__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=__a , collate_fn=__a , batch_size=__a )
a__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
UpperCAmelCase = mocked_dataloaders # noqa: F811
def A_ ( __a : List[Any] , __a : Tuple ):
"""simple docstring"""
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __a ) == "1":
a__ = 2
# New Code #
a__ = int(args.gradient_accumulation_steps )
a__ = int(args.local_sgd_steps )
# Initialize accelerator
a__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__a )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ = config["""lr"""]
a__ = int(config["""num_epochs"""] )
a__ = int(config["""seed"""] )
a__ = int(config["""batch_size"""] )
a__ = evaluate.load("""glue""" , """mrpc""" )
set_seed(__a )
a__ , a__ = get_dataloaders(__a , __a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
a__ = model.to(accelerator.device )
# Instantiate optimizer
a__ = AdamW(params=model.parameters() , lr=__a )
# Instantiate scheduler
a__ = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=100 , num_training_steps=(len(__a ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ = accelerator.prepare(
__a , __a , __a , __a , __a )
# Now we train the model
for epoch in range(__a ):
model.train()
with LocalSGD(
accelerator=__a , model=__a , local_sgd_steps=__a , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__a ):
a__ = model(**__a )
a__ = output.loss
accelerator.backward(__a )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
a__ = model(**__a )
a__ = outputs.logits.argmax(dim=-1 )
a__ , a__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__a , references=__a , )
a__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __a )
def A_ ( ):
"""simple docstring"""
a__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__a , default=__a , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__a , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=__a , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
a__ = parser.parse_args()
a__ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 351
| 0
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' ,'''False''' ) ) is not True ,reason='''Skipping test because should only be run when releasing minor transformers version''' ,)
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self) -> Union[str, Any]:
if self.framework == "pytorch":
subprocess.run(
F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding="""utf-8""" , check=SCREAMING_SNAKE_CASE , )
assert hasattr(self , """env""")
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> int:
_lowerCamelCase : Any = F'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'
# distributed data settings
_lowerCamelCase : Tuple = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=SCREAMING_SNAKE_CASE , instance_count=SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=SCREAMING_SNAKE_CASE , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=SCREAMING_SNAKE_CASE , py_version="""py36""" , )
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Any:
TrainingJobAnalytics(SCREAMING_SNAKE_CASE).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv')
@parameterized.expand([(2,)])
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Dict:
# create estimator
_lowerCamelCase : str = self.create_estimator(SCREAMING_SNAKE_CASE)
# run training
estimator.fit()
# result dataframe
_lowerCamelCase : str = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_lowerCamelCase : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""])
_lowerCamelCase : str = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCamelCase : str = (
Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 99_9999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy)
assert all(t <= self.results["""eval_loss"""] for t in eval_loss)
# dump tests result into json file to share in PR
with open(F'{estimator.latest_training_job.name}.json' , """w""") as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , SCREAMING_SNAKE_CASE)
| 88
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
UpperCAmelCase = logging.get_logger(__name__)
class lowercase__ ( A_ ):
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> None:
warnings.warn(
"""The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ImageGPTImageProcessor instead.""" , SCREAMING_SNAKE_CASE , )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
| 88
| 1
|
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase ) -> Tuple:
'''simple docstring'''
_lowercase =n
_lowercase =[None] * self.n
_lowercase =0 # index of the first element
_lowercase =0
_lowercase =0
def __len__( self ) -> int:
'''simple docstring'''
return self.size
def A__ ( self ) -> bool:
'''simple docstring'''
return self.size == 0
def A__ ( self ) -> List[str]:
'''simple docstring'''
return False if self.is_empty() else self.array[self.front]
def A__ ( self , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
_lowercase =data
_lowercase =(self.rear + 1) % self.n
self.size += 1
return self
def A__ ( self ) -> List[Any]:
'''simple docstring'''
if self.size == 0:
raise Exception('UNDERFLOW' )
_lowercase =self.array[self.front]
_lowercase =None
_lowercase =(self.front + 1) % self.n
self.size -= 1
return temp
| 380
|
def a ( A__ : Optional[int] ) -> Tuple:
"""simple docstring"""
_lowercase =[0] * len(A__ )
_lowercase =[]
_lowercase =[]
_lowercase =0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(A__ ) ):
if indegree[i] == 0:
queue.append(A__ )
while queue:
_lowercase =queue.pop(0 )
cnt += 1
topo.append(A__ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(A__ )
if cnt != len(A__ ):
print('Cycle exists' )
else:
print(A__ )
# Adjacency List of Graph
lowercase_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 380
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class __lowercase (__lowerCamelCase ):
"""simple docstring"""
_snake_case = """transfo-xl"""
_snake_case = ["""mems"""]
_snake_case = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , A=2_6_7_7_3_5 , A=[2_0_0_0_0, 4_0_0_0_0, 2_0_0_0_0_0] , A=1_0_2_4 , A=1_0_2_4 , A=1_6 , A=6_4 , A=4_0_9_6 , A=4 , A=False , A=1_8 , A=1_6_0_0 , A=1_0_0_0 , A=True , A=True , A=0 , A=-1 , A=True , A=0.1 , A=0.0 , A=True , A="normal" , A=0.01 , A=0.01 , A=0.02 , A=1e-5 , A=0 , **A , ) -> int:
snake_case : str = vocab_size
snake_case : Dict = []
self.cutoffs.extend(a__ )
if proj_share_all_but_first:
snake_case : Tuple = [False] + [True] * len(self.cutoffs )
else:
snake_case : Optional[Any] = [False] + [False] * len(self.cutoffs )
snake_case : Optional[int] = d_model
snake_case : Any = d_embed
snake_case : List[Any] = d_head
snake_case : List[Any] = d_inner
snake_case : Dict = div_val
snake_case : Tuple = pre_lnorm
snake_case : List[Any] = n_layer
snake_case : Optional[int] = n_head
snake_case : Tuple = mem_len
snake_case : Any = same_length
snake_case : Dict = attn_type
snake_case : Union[str, Any] = clamp_len
snake_case : Union[str, Any] = sample_softmax
snake_case : Optional[int] = adaptive
snake_case : Optional[int] = dropout
snake_case : List[str] = dropatt
snake_case : Dict = untie_r
snake_case : int = init
snake_case : str = init_range
snake_case : List[str] = proj_init_std
snake_case : int = init_std
snake_case : Dict = layer_norm_epsilon
super().__init__(eos_token_id=a__ , **a__ )
@property
def UpperCAmelCase ( self ) -> Any:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def UpperCAmelCase ( self , A ) -> Any:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 587
|
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = [True] * limit
a__ = False
a__ = False
a__ = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
a__ = i * 2
while index < limit:
a__ = False
a__ = index + i
a__ = [2]
for i in range(3 , _lowercase , 2 ):
if is_prime[i]:
primes.append(_lowercase )
return primes
def _lowerCAmelCase (_lowercase = 1_00_00_00 ):
"""simple docstring"""
a__ = prime_sieve(_lowercase )
a__ = 0
a__ = 0
for i in range(len(_lowercase ) ):
for j in range(i + length , len(_lowercase ) ):
a__ = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
a__ = j - i
a__ = sol
return largest
if __name__ == "__main__":
print(F"{solution() = }")
| 331
| 0
|
A_ :Optional[int] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 717
|
def A ( a_ = 600_851_475_143 ) -> int:
try:
__UpperCamelCase : int =int(a_ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
__UpperCamelCase : List[str] =2
__UpperCamelCase : Dict =0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
__UpperCamelCase : Optional[Any] =i
while n % i == 0:
__UpperCamelCase : Any =n // i
i += 1
return int(a_ )
if __name__ == "__main__":
print(f"{solution() = }")
| 154
| 0
|
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 ):
a__ = 1
a__ = 1
a__ = {1: 1}
for inputa in range(2 , __lowerCAmelCase ):
a__ = 0
a__ = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
a__ = (3 * number) + 1
counter += 1
if inputa not in counters:
a__ = counter
if counter > pre_counter:
a__ = inputa
a__ = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 335
|
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def __lowercase ( __lowerCAmelCase : Any ):
if hor == 1_2_8:
a__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
a__ = (3_2, 1_2_8, 2_5_6)
a__ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 3_2:
a__ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
a__ = (3_2, 6_4, 1_2_8, 2_5_6)
a__ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
a__ = torch.load(F'/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch' )
a__ = model.state_dict()
a__ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 1_4,
'out_channels': 1_4,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_5_5_3_6,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
a__ = UNetaDModel(**__lowerCAmelCase )
print(F'length of state dict: {len(state_dict.keys() )}' )
print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
a__ = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
a__ = state_dict.pop(__lowerCAmelCase )
hf_value_function.load_state_dict(__lowerCAmelCase )
torch.save(hf_value_function.state_dict() , F'hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin' )
with open(F'hub/hopper-medium-v2/unet/hor{hor}/config.json' , 'w' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def __lowercase ( ):
a__ = {
'in_channels': 1_4,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (3_2, 6_4, 1_2_8, 2_5_6),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_5_5_3_6,
'out_channels': 1_4,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
a__ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
a__ = model
a__ = UNetaDModel(**__lowerCAmelCase )
print(F'length of state dict: {len(state_dict.keys() )}' )
print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
a__ = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
a__ = state_dict.pop(__lowerCAmelCase )
hf_value_function.load_state_dict(__lowerCAmelCase )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 335
| 1
|
def lowerCamelCase_ ( _lowercase ) -> float:
__A : Any = 0
while len(_lowercase ) > 1:
__A : List[str] = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
__A : Dict = files.index(min(_lowercase ) )
temp += files[min_index]
files.pop(_lowercase )
files.append(_lowercase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 387
|
from __future__ import annotations
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase ) -> float:
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase , ) -> float:
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase , ) -> float:
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
_lowercase , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 387
| 1
|
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any]="attention" ):
'''simple docstring'''
snake_case_ : str = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
snake_case_ : Optional[Any] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
snake_case_ : Union[str, Any] = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
snake_case_ : Optional[int] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
snake_case_ : Union[str, Any] = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
snake_case_ : Optional[Any] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
snake_case_ : int = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
snake_case_ : List[str] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any]=False ):
'''simple docstring'''
if split_mlp_wi:
snake_case_ : str = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
snake_case_ : Any = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
snake_case_ : Union[str, Any] = (wi_a, wi_a)
else:
snake_case_ : Dict = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
snake_case_ : List[str] = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : str ):
'''simple docstring'''
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def __lowerCAmelCase ( __UpperCamelCase : dict , *, __UpperCamelCase : int , __UpperCamelCase : bool , __UpperCamelCase : bool = False ):
'''simple docstring'''
snake_case_ : str = traverse_util.flatten_dict(variables["""target"""] )
snake_case_ : str = {"""/""".join(__UpperCamelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
snake_case_ : Union[str, Any] = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , __UpperCamelCase )
snake_case_ : List[str] = collections.OrderedDict()
# Shared embeddings.
snake_case_ : int = old["""token_embedder/embedding"""]
# Encoder.
for i in range(__UpperCamelCase ):
# Block i, layer 0 (Self Attention).
snake_case_ : Optional[int] = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """encoder""" , """pre_attention_layer_norm""" )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Any = tax_attention_lookup(__UpperCamelCase , __UpperCamelCase , """encoder""" , """attention""" )
snake_case_ : List[str] = layer_norm
snake_case_ : int = k.T
snake_case_ : List[Any] = o.T
snake_case_ : str = q.T
snake_case_ : Union[str, Any] = v.T
# Block i, layer 1 (MLP).
snake_case_ : Optional[int] = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """encoder""" , """pre_mlp_layer_norm""" )
snake_case_ , snake_case_ : int = tax_mlp_lookup(__UpperCamelCase , __UpperCamelCase , """encoder""" , __UpperCamelCase )
snake_case_ : Dict = layer_norm
if split_mlp_wi:
snake_case_ : str = wi[0].T
snake_case_ : int = wi[1].T
else:
snake_case_ : List[str] = wi.T
snake_case_ : str = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
snake_case_ : Optional[int] = tax_relpos_bias_lookup(
__UpperCamelCase , __UpperCamelCase , """encoder""" ).T
snake_case_ : Dict = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
snake_case_ : Optional[int] = tax_relpos_bias_lookup(
__UpperCamelCase , 0 , """encoder""" ).T
snake_case_ : str = tax_relpos_bias_lookup(
__UpperCamelCase , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(__UpperCamelCase ):
# Block i, layer 0 (Self Attention).
snake_case_ : int = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """pre_self_attention_layer_norm""" )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Dict = tax_attention_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """self_attention""" )
snake_case_ : Union[str, Any] = layer_norm
snake_case_ : Tuple = k.T
snake_case_ : Optional[Any] = o.T
snake_case_ : Any = q.T
snake_case_ : int = v.T
# Block i, layer 1 (Cross Attention).
snake_case_ : Union[str, Any] = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """pre_cross_attention_layer_norm""" )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Optional[int] = tax_attention_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """encoder_decoder_attention""" )
snake_case_ : Dict = layer_norm
snake_case_ : Tuple = k.T
snake_case_ : int = o.T
snake_case_ : List[Any] = q.T
snake_case_ : Optional[Any] = v.T
# Block i, layer 2 (MLP).
snake_case_ : Optional[int] = tax_layer_norm_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , """pre_mlp_layer_norm""" )
snake_case_ , snake_case_ : List[str] = tax_mlp_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" , __UpperCamelCase )
snake_case_ : List[str] = layer_norm
if split_mlp_wi:
snake_case_ : int = wi[0].T
snake_case_ : List[Any] = wi[1].T
else:
snake_case_ : str = wi.T
snake_case_ : Optional[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
snake_case_ : Optional[Any] = tax_relpos_bias_lookup(__UpperCamelCase , __UpperCamelCase , """decoder""" ).T
snake_case_ : List[str] = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
snake_case_ : Optional[Any] = old["""decoder/logits_dense/kernel"""].T
return new
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : bool ):
'''simple docstring'''
snake_case_ : int = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
snake_case_ : Any = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
snake_case_ : Tuple = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
snake_case_ : Union[str, Any] = state_dict["""shared.weight"""]
return state_dict
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Optional[Any] = checkpoints.load_tax_checkpoint(__UpperCamelCase )
snake_case_ : str = convert_tax_to_pytorch(
__UpperCamelCase , num_layers=config.num_layers , is_encoder_only=__UpperCamelCase , scalable_attention=__UpperCamelCase )
snake_case_ : Optional[Any] = make_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , ):
'''simple docstring'''
snake_case_ : int = MTaConfig.from_json_file(__UpperCamelCase )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
snake_case_ : Optional[Any] = UMTaEncoderModel(__UpperCamelCase )
else:
snake_case_ : int = UMTaForConditionalGeneration(__UpperCamelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__UpperCamelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(__UpperCamelCase )
print("""Done""" )
if __name__ == "__main__":
__lowerCAmelCase : List[str] = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 58
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A ( unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=3_0 , snake_case_=4_0_0 , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=1 / 2_5_5 , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , snake_case_=True , ) -> Optional[int]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_a = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
_a = parent
_a = batch_size
_a = num_channels
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_rescale
_a = rescale_factor
_a = do_normalize
_a = image_mean
_a = image_std
_a = do_pad
def __lowerCAmelCase ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCAmelCase ( self , snake_case_ , snake_case_=False ) -> Union[str, Any]:
if not batched:
_a = image_inputs[0]
if isinstance(snake_case_ , Image.Image ):
_a , _a = image.size
else:
_a , _a = image.shape[1], image.shape[2]
if w < h:
_a = int(self.size["shortest_edge"] * h / w )
_a = self.size["shortest_edge"]
elif w > h:
_a = self.size["shortest_edge"]
_a = int(self.size["shortest_edge"] * w / h )
else:
_a = self.size["shortest_edge"]
_a = self.size["shortest_edge"]
else:
_a = []
for image in image_inputs:
_a , _a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_a = max(snake_case_ , key=lambda snake_case_ : item[0] )[0]
_a = max(snake_case_ , key=lambda snake_case_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A ( a , unittest.TestCase ):
__UpperCAmelCase : Union[str, Any] = DetrImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = DetrImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> List[Any]:
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , "image_mean" ) )
self.assertTrue(hasattr(snake_case_ , "image_std" ) )
self.assertTrue(hasattr(snake_case_ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case_ , "do_rescale" ) )
self.assertTrue(hasattr(snake_case_ , "rescale_factor" ) )
self.assertTrue(hasattr(snake_case_ , "do_resize" ) )
self.assertTrue(hasattr(snake_case_ , "size" ) )
self.assertTrue(hasattr(snake_case_ , "do_pad" ) )
def __lowerCAmelCase ( self ) -> int:
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , snake_case_ )
_a = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=snake_case_ )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , snake_case_ )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
def __lowerCAmelCase ( self ) -> Any:
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a , _a = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
_a = image_processing(snake_case_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self ) -> str:
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a = image_processing(snake_case_ , return_tensors="pt" ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCAmelCase ( self ) -> List[Any]:
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a = image_processing(snake_case_ , return_tensors="pt" ).pixel_values
_a , _a = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
# prepare image and target
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
_a = json.loads(f.read() )
_a = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
_a = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50" )
_a = image_processing(images=snake_case_ , annotations=snake_case_ , return_tensors="pt" )
# verify pixel values
_a = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , snake_case_ )
_a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , snake_case_ , atol=1E-4 ) )
# verify area
_a = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , snake_case_ ) )
# verify boxes
_a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , snake_case_ )
_a = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , snake_case_ , atol=1E-3 ) )
# verify image_id
_a = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , snake_case_ ) )
# verify is_crowd
_a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , snake_case_ ) )
# verify class_labels
_a = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , snake_case_ ) )
# verify orig_size
_a = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , snake_case_ ) )
# verify size
_a = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , snake_case_ ) )
@slow
def __lowerCAmelCase ( self ) -> int:
# prepare image, target and masks_path
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
_a = json.loads(f.read() )
_a = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
_a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
_a = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic" )
_a = image_processing(images=snake_case_ , annotations=snake_case_ , masks_path=snake_case_ , return_tensors="pt" )
# verify pixel values
_a = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , snake_case_ )
_a = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , snake_case_ , atol=1E-4 ) )
# verify area
_a = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , snake_case_ ) )
# verify boxes
_a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , snake_case_ )
_a = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , snake_case_ , atol=1E-3 ) )
# verify image_id
_a = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , snake_case_ ) )
# verify is_crowd
_a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , snake_case_ ) )
# verify class_labels
_a = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , snake_case_ ) )
# verify masks
_a = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , snake_case_ )
# verify orig_size
_a = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , snake_case_ ) )
# verify size
_a = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , snake_case_ ) )
| 131
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
A_ = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 721
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "roformer"
def __init__( self: List[str], a_: Tuple=50_000, a_: Optional[Any]=None, a_: List[str]=768, a_: Union[str, Any]=12, a_: Optional[int]=12, a_: Optional[Any]=3_072, a_: List[str]="gelu", a_: List[str]=0.1, a_: Tuple=0.1, a_: Optional[int]=1_536, a_: Any=2, a_: Optional[int]=0.02, a_: Tuple=1E-12, a_: Dict=0, a_: str=False, a_: Dict=True, **a_: Dict, ):
'''simple docstring'''
super().__init__(pad_token_id=a_, **a_ )
_snake_case : int = vocab_size
_snake_case : int = hidden_size if embedding_size is None else embedding_size
_snake_case : Dict = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Dict = hidden_act
_snake_case : Optional[int] = intermediate_size
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = max_position_embeddings
_snake_case : Tuple = type_vocab_size
_snake_case : List[Any] = initializer_range
_snake_case : List[Any] = layer_norm_eps
_snake_case : Optional[Any] = rotary_value
_snake_case : List[str] = use_cache
class lowercase( __a ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : str = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case : List[str] = {0: """batch""", 1: """sequence"""}
_snake_case : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 28
| 0
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = '''ClapFeatureExtractor'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : int ,__A : Union[str, Any] ,__A : Union[str, Any] ) -> Dict:
super().__init__(__A ,__A )
def __call__( self : Dict ,__A : Dict=None ,__A : Tuple=None ,__A : Optional[Any]=None ,**__A : List[Any] ) -> List[str]:
_lowercase = kwargs.pop('sampling_rate' ,__A )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
_lowercase = self.tokenizer(__A ,return_tensors=__A ,**__A )
if audios is not None:
_lowercase = self.feature_extractor(
__A ,sampling_rate=__A ,return_tensors=__A ,**__A )
if text is not None and audios is not None:
_lowercase = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) ,tensor_type=__A )
def __UpperCAmelCase ( self : List[str] ,*__A : Optional[Any] ,**__A : Optional[Any] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*__A ,**__A )
def __UpperCAmelCase ( self : Tuple ,*__A : int ,**__A : List[Any] ) -> Tuple:
return self.tokenizer.decode(*__A ,**__A )
@property
def __UpperCAmelCase ( self : str ) -> List[Any]:
_lowercase = self.tokenizer.model_input_names
_lowercase = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 67
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self : Dict , lowercase : int , lowercase : Dict=7 , lowercase : Optional[int]=3 , lowercase : Any=18 , lowercase : Tuple=30 , lowercase : List[str]=4_00 , lowercase : Dict=True , lowercase : Dict=None , lowercase : List[str]=True , lowercase : List[str]=None , lowercase : Tuple=True , lowercase : List[str]=[0.5, 0.5, 0.5] , lowercase : Dict=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
UpperCAmelCase : Tuple = size if size is not None else {"shortest_edge": 18}
UpperCAmelCase : Union[str, Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : List[str] = batch_size
UpperCAmelCase : Optional[Any] = num_channels
UpperCAmelCase : Tuple = image_size
UpperCAmelCase : List[Any] = min_resolution
UpperCAmelCase : Optional[int] = max_resolution
UpperCAmelCase : List[Any] = do_resize
UpperCAmelCase : Dict = size
UpperCAmelCase : Tuple = do_center_crop
UpperCAmelCase : Optional[int] = crop_size
UpperCAmelCase : int = do_normalize
UpperCAmelCase : Tuple = image_mean
UpperCAmelCase : Tuple = image_std
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = LevitImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase : Dict = LevitImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , "image_mean" ) )
self.assertTrue(hasattr(lowercase , "image_std" ) )
self.assertTrue(hasattr(lowercase , "do_normalize" ) )
self.assertTrue(hasattr(lowercase , "do_resize" ) )
self.assertTrue(hasattr(lowercase , "do_center_crop" ) )
self.assertTrue(hasattr(lowercase , "size" ) )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
pass
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
UpperCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase : Any = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
UpperCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase : Optional[int] = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
UpperCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCAmelCase : Dict = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 595
| 0
|
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
lowerCamelCase : Any = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
lowerCamelCase : List[str] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __lowercase ( self , a__ , a__ , a__ ) -> Any:
'''simple docstring'''
__snake_case :Optional[Any] = AudioClassificationPipeline(model=a__ , feature_extractor=a__ )
# test with a raw waveform
__snake_case :int = np.zeros((3_40_00,) )
__snake_case :Optional[Any] = np.zeros((1_40_00,) )
return audio_classifier, [audioa, audio]
def __lowercase ( self , a__ , a__ ) -> str:
'''simple docstring'''
__snake_case , __snake_case :int = examples
__snake_case :Tuple = audio_classifier(a__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
__snake_case :List[Any] = audio_classifier(a__ , top_k=1 )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
self.run_torchaudio(a__ )
@require_torchaudio
def __lowercase ( self , a__ ) -> Union[str, Any]:
'''simple docstring'''
import datasets
# test with a local file
__snake_case :List[str] = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
__snake_case :Any = dataset[0]["""audio"""]["""array"""]
__snake_case :Union[str, Any] = audio_classifier(a__ )
self.assertEqual(
a__ , [
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
{"""score""": ANY(a__ ), """label""": ANY(a__ )},
] , )
@require_torch
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :Optional[Any] = """anton-l/wav2vec2-random-tiny-classifier"""
__snake_case :Union[str, Any] = pipeline("""audio-classification""" , model=a__ )
__snake_case :List[Any] = np.ones((80_00,) )
__snake_case :Optional[int] = audio_classifier(a__ , top_k=4 )
__snake_case :Tuple = [
{"""score""": 0.08_42, """label""": """no"""},
{"""score""": 0.08_38, """label""": """up"""},
{"""score""": 0.08_37, """label""": """go"""},
{"""score""": 0.08_34, """label""": """right"""},
]
__snake_case :str = [
{"""score""": 0.08_45, """label""": """stop"""},
{"""score""": 0.08_44, """label""": """on"""},
{"""score""": 0.08_41, """label""": """right"""},
{"""score""": 0.08_34, """label""": """left"""},
]
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
__snake_case :List[Any] = {"""array""": np.ones((80_00,) ), """sampling_rate""": audio_classifier.feature_extractor.sampling_rate}
__snake_case :List[str] = audio_classifier(a__ , top_k=4 )
self.assertIn(nested_simplify(a__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __lowercase ( self ) -> Any:
'''simple docstring'''
import datasets
__snake_case :str = """superb/wav2vec2-base-superb-ks"""
__snake_case :Union[str, Any] = pipeline("""audio-classification""" , model=a__ )
__snake_case :Dict = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
__snake_case :Tuple = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
__snake_case :Optional[Any] = audio_classifier(a__ , top_k=4 )
self.assertEqual(
nested_simplify(a__ , decimals=3 ) , [
{"""score""": 0.9_81, """label""": """go"""},
{"""score""": 0.0_07, """label""": """up"""},
{"""score""": 0.0_06, """label""": """_unknown_"""},
{"""score""": 0.0_01, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def __lowercase ( self ) -> str:
'''simple docstring'''
pass
| 291
|
import numpy as np
import qiskit
def UpperCamelCase ( snake_case__ : int = 8 ,snake_case__ : int | None = None ):
'''simple docstring'''
__snake_case :Tuple = np.random.default_rng(seed=snake_case__ )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__snake_case :Union[str, Any] = 6 * key_len
# Measurement basis for Alice's qubits.
__snake_case :int = rng.integers(2 ,size=snake_case__ )
# The set of states Alice will prepare.
__snake_case :Dict = rng.integers(2 ,size=snake_case__ )
# Measurement basis for Bob's qubits.
__snake_case :str = rng.integers(2 ,size=snake_case__ )
# Quantum Circuit to simulate BB84
__snake_case :Optional[int] = qiskit.QuantumCircuit(snake_case__ ,name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(snake_case__ ):
if alice_state[index] == 1:
bbaa_circ.x(snake_case__ )
if alice_basis[index] == 1:
bbaa_circ.h(snake_case__ )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(snake_case__ ):
if bob_basis[index] == 1:
bbaa_circ.h(snake_case__ )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__snake_case :Optional[int] = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__snake_case :List[Any] = qiskit.execute(snake_case__ ,snake_case__ ,shots=1 ,seed_simulator=snake_case__ )
# Returns the result of measurement.
__snake_case :Tuple = job.result().get_counts(snake_case__ ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__snake_case :Any = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
snake_case__ ,snake_case__ ,snake_case__ )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__snake_case :Dict = gen_key[:key_len] if len(snake_case__ ) >= key_len else gen_key.ljust(snake_case__ ,"""0""" )
return key
if __name__ == "__main__":
print(f'''The generated key is : {bbaa(8, seed=0)}''')
from doctest import testmod
testmod()
| 291
| 1
|
'''simple docstring'''
import math
from datetime import datetime, timedelta
def __snake_case ( lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
__magic_name__ = year % 19
__magic_name__ = year % 4
__magic_name__ = year % 7
__magic_name__ = math.floor(year / 100 )
__magic_name__ = math.floor((13 + 8 * leap_day_inhibits) / 25 )
__magic_name__ = leap_day_inhibits / 4
__magic_name__ = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
__magic_name__ = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__magic_name__ = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
__magic_name__ = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(lowercase_ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(lowercase_ , 4 , 18 )
else:
return datetime(lowercase_ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
__magic_name__ : Union[str, Any] ="""will be""" if year > datetime.now().year else """was"""
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 664
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
UpperCamelCase = ArgumentParser("Diffusers CLI tool" , usage="diffusers-cli <command> [<args>]" )
UpperCamelCase = parser.add_subparsers(help="diffusers-cli command helpers" )
# Register commands
EnvironmentCommand.register_subcommand(lowercase_ )
# Let's go
UpperCamelCase = parser.parse_args()
if not hasattr(lowercase_ , "func" ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase = args.func(lowercase_ )
service.run()
if __name__ == "__main__":
main()
| 606
| 0
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
a =np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
a =[0, 25, 50]
a =[25, 50, 75]
a =fuzz.membership.trimf(X, abca)
a =fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
a =np.ones(75)
a =np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
a =fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
a =fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
a =fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
a =fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
a =young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
a =young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
a =fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
a =fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 337
|
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
__lowerCamelCase : Union[str, Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
__lowerCamelCase : Optional[Any] = 0
while number > 0:
__lowerCamelCase : List[Any] = number % 1_0
sum_of_digits += last_digit
__lowerCamelCase : Optional[Any] = number // 1_0 # Removing the last_digit from the given number
return sum_of_digits
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1_0_0 ) -> int:
__lowerCamelCase : int = factorial(lowerCamelCase__ )
__lowerCamelCase : Optional[int] = split_and_add(lowerCamelCase__ )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 337
| 1
|
def _SCREAMING_SNAKE_CASE ( lowercase : int = 10_00 ):
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 70
|
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __UpperCAmelCase ( lowerCamelCase_ : np.ndarray , lowerCamelCase_ : np.ndarray ) -> float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowerCamelCase_ , lowerCamelCase_ ) ) )
def __UpperCAmelCase ( lowerCamelCase_ : np.ndarray , lowerCamelCase_ : np.ndarray ) -> list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
SCREAMING_SNAKE_CASE_ : Optional[int] = (
'Wrong input data\'s dimensions... '
F'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(lowerCamelCase_ )
try:
if dataset.shape[1] != value_array.shape[1]:
SCREAMING_SNAKE_CASE_ : List[Any] = (
'Wrong input data\'s shape... '
F'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(lowerCamelCase_ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
SCREAMING_SNAKE_CASE_ : Tuple = (
'Input data have different datatype... '
F'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for value in value_array:
SCREAMING_SNAKE_CASE_ : str = euclidean(lowerCamelCase_ , dataset[0] )
SCREAMING_SNAKE_CASE_ : List[Any] = dataset[0].tolist()
for dataset_value in dataset[1:]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = euclidean(lowerCamelCase_ , lowerCamelCase_ )
if dist > temp_dist:
SCREAMING_SNAKE_CASE_ : Optional[int] = temp_dist
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __UpperCAmelCase ( lowerCamelCase_ : np.ndarray , lowerCamelCase_ : np.ndarray ) -> float:
"""simple docstring"""
return np.dot(lowerCamelCase_ , lowerCamelCase_ ) / (norm(lowerCamelCase_ ) * norm(lowerCamelCase_ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105
| 0
|
from typing import Dict
from .base import GenericTensor, Pipeline
class _lowercase ( lowercase__):
"""simple docstring"""
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : str=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Tuple=None , **__lowerCamelCase : int ):
'''simple docstring'''
if tokenize_kwargs is None:
lowerCamelCase__ : Optional[int] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)" )
lowerCamelCase__ : List[str] = truncation
lowerCamelCase__ : Optional[Any] = tokenize_kwargs
lowerCamelCase__ : Optional[Any] = {}
if return_tensors is not None:
lowerCamelCase__ : str = return_tensors
return preprocess_params, {}, postprocess_params
def lowerCAmelCase ( self : int , __lowerCamelCase : Optional[Any] , **__lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.framework
lowerCamelCase__ : List[Any] = self.tokenizer(__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase )
return model_inputs
def lowerCAmelCase ( self : Tuple , __lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.model(**__lowerCamelCase )
return model_outputs
def lowerCAmelCase ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str]=False ):
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Optional[int] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : List[Any] ):
'''simple docstring'''
return super().__call__(*__lowerCamelCase , **__lowerCamelCase )
| 704
|
from __future__ import annotations
def lowercase_ ( _A : str , _A : list[str] | None = None , _A : dict[str, float] | None = None , _A : bool = False , ):
"""simple docstring"""
lowerCamelCase__ : Tuple = cipher_alphabet or [chr(_A ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCamelCase__ : Dict = {
"a": 0.08_497,
"b": 0.01_492,
"c": 0.02_202,
"d": 0.04_253,
"e": 0.11_162,
"f": 0.02_228,
"g": 0.02_015,
"h": 0.06_094,
"i": 0.07_546,
"j": 0.00_153,
"k": 0.01_292,
"l": 0.04_025,
"m": 0.02_406,
"n": 0.06_749,
"o": 0.07_507,
"p": 0.01_929,
"q": 0.00_095,
"r": 0.07_587,
"s": 0.06_327,
"t": 0.09_356,
"u": 0.02_758,
"v": 0.00_978,
"w": 0.02_560,
"x": 0.00_150,
"y": 0.01_994,
"z": 0.00_077,
}
else:
# Custom frequencies dictionary
lowerCamelCase__ : Optional[int] = frequencies_dict
if not case_sensitive:
lowerCamelCase__ : str = ciphertext.lower()
# Chi squared statistic values
lowerCamelCase__ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(_A ) ):
lowerCamelCase__ : Optional[Any] = ""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCamelCase__ : Dict = (alphabet_letters.index(letter.lower() ) - shift) % len(
_A )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCamelCase__ : str = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCamelCase__ : List[str] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase__ : List[str] = decrypted_with_shift.lower().count(_A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase__ : List[Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase__ : str = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase__ : Any = decrypted_with_shift.count(_A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase__ : str = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase__ : int = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCamelCase__ : Optional[int] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_A : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCamelCase__ : int = min(
_A , key=_A , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : int = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 5
| 0
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( lowercase__ ):
"""simple docstring"""
_lowercase = (KDPMaDiscreteScheduler,)
_lowercase = 1_0
def _UpperCamelCase( self : Dict , **lowerCamelCase__ : int ):
a__ : Optional[Any] = {
'num_train_timesteps': 1_100,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**_A )
return config
def _UpperCamelCase( self : Tuple ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_A )
def _UpperCamelCase( self : Any ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def _UpperCamelCase( self : Optional[Any] ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_A )
def _UpperCamelCase( self : List[str] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def _UpperCamelCase( self : Any ):
a__ : int = self.scheduler_classes[0]
a__ : Tuple = self.get_scheduler_config(prediction_type="v_prediction" )
a__ : Optional[Any] = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
a__ : str = self.dummy_model()
a__ : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ : Any = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
a__ : Tuple = scheduler.scale_model_input(_A , _A )
a__ : Optional[int] = model(_A , _A )
a__ : List[str] = scheduler.step(_A , _A , _A )
a__ : Any = output.prev_sample
a__ : int = torch.sum(torch.abs(_A ) )
a__ : Optional[Any] = torch.mean(torch.abs(_A ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34E-07 ) < 1E-2
assert abs(result_mean.item() - 6.11_12E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def _UpperCamelCase( self : Tuple ):
if torch_device == "mps":
return
a__ : str = self.scheduler_classes[0]
a__ : List[str] = self.get_scheduler_config()
a__ : List[Any] = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps )
a__ : Optional[int] = self.dummy_model()
a__ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
a__ : List[str] = sample.to(_A )
for i, t in enumerate(scheduler.timesteps ):
a__ : Optional[Any] = scheduler.scale_model_input(_A , _A )
a__ : Optional[int] = model(_A , _A )
a__ : str = scheduler.step(_A , _A , _A )
a__ : Dict = output.prev_sample
a__ : str = torch.sum(torch.abs(_A ) )
a__ : Optional[int] = torch.mean(torch.abs(_A ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def _UpperCamelCase( self : Optional[Any] ):
if torch_device == "mps":
return
a__ : str = self.scheduler_classes[0]
a__ : List[str] = self.get_scheduler_config()
a__ : Union[str, Any] = scheduler_class(**_A )
scheduler.set_timesteps(self.num_inference_steps , device=_A )
a__ : List[Any] = self.dummy_model()
a__ : Optional[int] = self.dummy_sample_deter.to(_A ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
a__ : Optional[int] = scheduler.scale_model_input(_A , _A )
a__ : Union[str, Any] = model(_A , _A )
a__ : Union[str, Any] = scheduler.step(_A , _A , _A )
a__ : Tuple = output.prev_sample
a__ : List[Any] = torch.sum(torch.abs(_A ) )
a__ : int = torch.mean(torch.abs(_A ) )
if str(_A ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 37
|
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase :Optional[int] = get_tests_dir('''fixtures''')
lowerCAmelCase :Any = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
lowerCAmelCase :Tuple = get_tests_dir('''fixtures/dummy-config.json''')
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
__magic_name__ : str = 0
def __lowerCAmelCase ( self : int ) -> Tuple:
__magic_name__ : List[str] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(_A , _A )
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
__magic_name__ : List[str] = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ : Union[str, Any] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__magic_name__ : Dict = AutoFeatureExtractor.from_pretrained(_A ).to_dict()
config_dict.pop('feature_extractor_type' )
__magic_name__ : int = WavaVecaFeatureExtractor(**_A )
# save in new folder
model_config.save_pretrained(_A )
config.save_pretrained(_A )
__magic_name__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(_A )
# make sure private variable is not incorrectly saved
__magic_name__ : List[str] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(_A , _A )
def __lowerCAmelCase ( self : int ) -> Union[str, Any]:
__magic_name__ : Tuple = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
with self.assertRaisesRegex(
_A , 'bert-base is not a local folder and is not a valid model identifier' ):
__magic_name__ : str = AutoFeatureExtractor.from_pretrained('bert-base' )
def __lowerCAmelCase ( self : Any ) -> Tuple:
with self.assertRaisesRegex(
_A , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__magic_name__ : Tuple = AutoFeatureExtractor.from_pretrained(_A , revision='aaaaaa' )
def __lowerCAmelCase ( self : Dict ) -> str:
with self.assertRaisesRegex(
_A , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
__magic_name__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_A ):
__magic_name__ : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A ):
__magic_name__ : Optional[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_A )
__magic_name__ : List[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_A )
__magic_name__ : List[Any] = AutoFeatureExtractor.from_pretrained(_A , trust_remote_code=_A )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def __lowerCAmelCase ( self : str ) -> Tuple:
try:
AutoConfig.register('custom' , _A )
AutoFeatureExtractor.register(_A , _A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoFeatureExtractor.register(_A , _A )
# Now that the config is registered, it can be used as any other config with the auto-API
__magic_name__ : str = CustomFeatureExtractor.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_A )
__magic_name__ : List[Any] = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Any = True
try:
AutoConfig.register('custom' , _A )
AutoFeatureExtractor.register(_A , _A )
# If remote code is not set, the default is to use local
__magic_name__ : Optional[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__magic_name__ : str = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__magic_name__ : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(_A , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 561
| 0
|
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def _snake_case ( A ) -> str:
if not isinstance(_lowercase , _lowercase ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
lowerCAmelCase__ = precision
lowerCAmelCase__ = ceil(precision / 14 )
lowerCAmelCase__ = 426880 * Decimal(10005 ).sqrt()
lowerCAmelCase__ = 1
lowerCAmelCase__ = 13591409
lowerCAmelCase__ = Decimal(_lowercase )
for k in range(1 , _lowercase ):
lowerCAmelCase__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowercase ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__UpperCAmelCase = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 712
|
'''simple docstring'''
from math import pi, sqrt, tan
def _snake_case ( A ) -> float:
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def _snake_case ( A , A , A ) -> float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _snake_case ( A ) -> float:
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def _snake_case ( A ) -> float:
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def _snake_case ( A , A ) -> float:
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _snake_case ( A , A , A ) -> float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
lowerCAmelCase__ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _snake_case ( A , A ) -> float:
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def _snake_case ( A , A ) -> float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(A , 2 ) * torus_radius * tube_radius
def _snake_case ( A , A ) -> float:
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def _snake_case ( A ) -> float:
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def _snake_case ( A , A ) -> float:
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def _snake_case ( A , A , A ) -> float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
lowerCAmelCase__ = (sidea + sidea + sidea) / 2
lowerCAmelCase__ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _snake_case ( A , A ) -> float:
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def _snake_case ( A , A , A ) -> float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def _snake_case ( A ) -> float:
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def _snake_case ( A , A ) -> float:
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def _snake_case ( A , A ) -> float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def _snake_case ( A , A ) -> float:
if not isinstance(A , A ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(f"""Rectangle: {area_rectangle(10, 20) = }""")
print(f"""Square: {area_square(10) = }""")
print(f"""Triangle: {area_triangle(10, 10) = }""")
print(f"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(f"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(f"""Rhombus: {area_rhombus(10, 20) = }""")
print(f"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(f"""Circle: {area_circle(20) = }""")
print(f"""Ellipse: {area_ellipse(10, 20) = }""")
print('''\nSurface Areas of various geometric shapes: \n''')
print(f"""Cube: {surface_area_cube(20) = }""")
print(f"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(f"""Sphere: {surface_area_sphere(20) = }""")
print(f"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(f"""Cone: {surface_area_cone(10, 20) = }""")
print(f"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(f"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(f"""Torus: {surface_area_torus(20, 10) = }""")
print(f"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(f"""Square: {area_reg_polygon(4, 10) = }""")
print(f"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 98
| 0
|
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase = False ) -> list[float]:
if radian_mode:
return [magnitude * cos(lowercase ), magnitude * sin(lowercase )]
return [magnitude * cos(radians(lowercase ) ), magnitude * sin(radians(lowercase ) )]
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase = 10**-1 ) -> bool:
snake_case : NDArray[floataa] = cross(lowercase ,lowercase )
snake_case : float = sum(lowercase )
return abs(lowercase ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCamelCase : Optional[int] = array(
[
polar_force(718.4, 1_8_0 - 3_0),
polar_force(879.54, 4_5),
polar_force(1_0_0, -9_0),
]
)
lowerCamelCase : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCamelCase : str = array(
[
polar_force(3_0 * 9.81, 1_5),
polar_force(2_1_5, 1_8_0 - 4_5),
polar_force(2_6_4, 9_0 - 3_0),
]
)
lowerCamelCase : Tuple = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCamelCase : List[str] = array([[0, -2_0_0_0], [0, -1_2_0_0], [0, 1_5_6_0_0], [0, -1_2_4_0_0]])
lowerCamelCase : int = array([[0, 0], [6, 0], [1_0, 0], [1_2, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 587
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : List[str] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """camembert"""
def __init__( self , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1e-1_2 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=None , **A , ) -> Any:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
snake_case : int = vocab_size
snake_case : Dict = hidden_size
snake_case : Any = num_hidden_layers
snake_case : Optional[Any] = num_attention_heads
snake_case : Any = hidden_act
snake_case : List[str] = intermediate_size
snake_case : List[str] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Dict = max_position_embeddings
snake_case : str = type_vocab_size
snake_case : List[Any] = initializer_range
snake_case : int = layer_norm_eps
snake_case : Optional[int] = position_embedding_type
snake_case : Tuple = use_cache
snake_case : Tuple = classifier_dropout
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
snake_case : Optional[int] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 587
| 1
|
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __UpperCamelCase ( self ) ->List[str]:
'''simple docstring'''
__a = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase ) )
def __UpperCamelCase ( self ) ->str:
'''simple docstring'''
__a = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase ) )
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
__a = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase ) )
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
__a = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase ) )
def __UpperCamelCase ( self ) ->str:
'''simple docstring'''
__a = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase ) )
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
__a = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__a = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def __UpperCamelCase ( self ) ->str:
'''simple docstring'''
__a = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__a = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def __UpperCamelCase ( self ) ->List[Any]:
'''simple docstring'''
# pass variant but use the non-variant filenames
__a = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
__a = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def __UpperCamelCase ( self ) ->Union[str, Any]:
'''simple docstring'''
__a = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__a = 'fp16'
self.assertFalse(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def __UpperCamelCase ( self ) ->Dict:
'''simple docstring'''
__a = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
__a = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def __UpperCamelCase ( self ) ->Tuple:
'''simple docstring'''
# pass variant but use the non-variant filenames
__a = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
__a = 'fp16'
self.assertTrue(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
__a = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
__a = 'fp16'
self.assertFalse(is_safetensors_compatible(lowerCamelCase , variant=lowerCamelCase ) )
| 270
|
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__a =IFPipeline
__a =TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
__a =TEXT_TO_IMAGE_BATCH_PARAMS
__a =PipelineTesterMixin.required_optional_params - {"latents"}
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
return self._get_dummy_components()
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase=0 ) ->Tuple:
'''simple docstring'''
if str(lowerCamelCase ).startswith('mps' ):
__a = torch.manual_seed(lowerCamelCase )
else:
__a = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__a = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __UpperCamelCase ( self ) ->Optional[int]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
self._test_save_load_local()
def __UpperCamelCase ( self ) ->Tuple:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCamelCase ( self ) ->List[str]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
# if
__a = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
__a = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
__a , __a = pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
__a = None
__a = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
__a = IFImgaImgPipeline(**pipe_a.components )
__a = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
__a = IFInpaintingPipeline(**pipe_a.components )
__a = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->List[Any]:
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
__a = torch.Generator(device='cpu' ).manual_seed(0 )
__a = pipe_a(
prompt_embeds=lowerCamelCase , negative_prompt_embeds=lowerCamelCase , num_inference_steps=2 , generator=lowerCamelCase , output_type='np' , )
__a = output.images[0]
assert image.shape == (64, 64, 3)
__a = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
# pipeline 2
_start_torch_memory_measurement()
__a = torch.Generator(device='cpu' ).manual_seed(0 )
__a = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase )
__a = pipe_a(
prompt_embeds=lowerCamelCase , negative_prompt_embeds=lowerCamelCase , image=lowerCamelCase , generator=lowerCamelCase , num_inference_steps=2 , output_type='np' , )
__a = output.images[0]
assert image.shape == (256, 256, 3)
__a = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->Any:
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
__a = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase )
__a = torch.Generator(device='cpu' ).manual_seed(0 )
__a = pipe_a(
prompt_embeds=lowerCamelCase , negative_prompt_embeds=lowerCamelCase , image=lowerCamelCase , num_inference_steps=2 , generator=lowerCamelCase , output_type='np' , )
__a = output.images[0]
assert image.shape == (64, 64, 3)
__a = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
# pipeline 2
_start_torch_memory_measurement()
__a = torch.Generator(device='cpu' ).manual_seed(0 )
__a = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCamelCase )
__a = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase )
__a = pipe_a(
prompt_embeds=lowerCamelCase , negative_prompt_embeds=lowerCamelCase , image=lowerCamelCase , original_image=lowerCamelCase , generator=lowerCamelCase , num_inference_steps=2 , output_type='np' , )
__a = output.images[0]
assert image.shape == (256, 256, 3)
__a = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->Optional[int]:
'''simple docstring'''
# pipeline 1
_start_torch_memory_measurement()
__a = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase )
__a = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(lowerCamelCase )
__a = torch.Generator(device='cpu' ).manual_seed(0 )
__a = pipe_a(
prompt_embeds=lowerCamelCase , negative_prompt_embeds=lowerCamelCase , image=lowerCamelCase , mask_image=lowerCamelCase , num_inference_steps=2 , generator=lowerCamelCase , output_type='np' , )
__a = output.images[0]
assert image.shape == (64, 64, 3)
__a = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
# pipeline 2
_start_torch_memory_measurement()
__a = torch.Generator(device='cpu' ).manual_seed(0 )
__a = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase )
__a = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCamelCase )
__a = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(lowerCamelCase )
__a = pipe_a(
prompt_embeds=lowerCamelCase , negative_prompt_embeds=lowerCamelCase , image=lowerCamelCase , mask_image=lowerCamelCase , original_image=lowerCamelCase , generator=lowerCamelCase , num_inference_steps=2 , output_type='np' , )
__a = output.images[0]
assert image.shape == (256, 256, 3)
__a = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__a = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def __UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 270
| 1
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = CanineTokenizer
_UpperCAmelCase = False
def lowerCamelCase_ ( self ) -> Tuple:
super().setUp()
_UpperCAmelCase = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase_ ( self ) -> Any:
return CanineTokenizer.from_pretrained('google/canine-s' )
def lowerCamelCase_ ( self , **snake_case ) -> CanineTokenizer:
_UpperCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case )
_UpperCAmelCase = 1024
return tokenizer
@require_torch
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = self.canine_tokenizer
_UpperCAmelCase = ['Life is like a box of chocolates.', 'You never know what you\'re gonna get.']
# fmt: off
_UpperCAmelCase = [57344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57345, 0, 0, 0, 0]
# fmt: on
_UpperCAmelCase = tokenizer(snake_case , padding=snake_case , return_tensors='pt' )
self.assertIsInstance(snake_case , snake_case )
_UpperCAmelCase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(snake_case , snake_case )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = self.canine_tokenizer
_UpperCAmelCase = ['Once there was a man.', 'He wrote a test in HuggingFace Tranformers.']
_UpperCAmelCase = tokenizer(snake_case , padding=snake_case , return_tensors='pt' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('input_ids' , snake_case )
self.assertIn('attention_mask' , snake_case )
self.assertIn('token_type_ids' , snake_case )
@require_torch
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.canine_tokenizer
_UpperCAmelCase = [
'What\'s the weater?',
'It\'s about 25 degrees.',
]
_UpperCAmelCase = tokenizer(
text_target=snake_case , max_length=32 , padding='max_length' , truncation=snake_case , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def lowerCamelCase_ ( self ) -> Tuple:
# safety check on max_len default value so we are sure the test works
_UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = ' He is very happy, UNwant\u00E9d,running'
_UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case )
tokenizer.save_pretrained(snake_case )
_UpperCAmelCase = tokenizer.__class__.from_pretrained(snake_case )
_UpperCAmelCase = after_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
shutil.rmtree(snake_case )
_UpperCAmelCase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = ' He is very happy, UNwant\u00E9d,running'
_UpperCAmelCase = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
_UpperCAmelCase = chr(0xe_0_0_7 )
additional_special_tokens.append(snake_case )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case )
tokenizer.save_pretrained(snake_case )
_UpperCAmelCase = tokenizer.__class__.from_pretrained(snake_case )
_UpperCAmelCase = after_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
self.assertIn(snake_case , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_UpperCAmelCase = tokenizer.__class__.from_pretrained(snake_case , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(snake_case )
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = self.get_tokenizers(do_lower_case=snake_case )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
_UpperCAmelCase , _UpperCAmelCase = self.get_clean_sequence(snake_case )
# a special token for Canine can be defined as follows:
_UpperCAmelCase = 0xe_0_0_5
_UpperCAmelCase = chr(snake_case )
tokenizer.add_special_tokens({'cls_token': special_token} )
_UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertEqual(len(snake_case ) , 1 )
_UpperCAmelCase = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=snake_case )
_UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case )
_UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case )
_UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertEqual(snake_case , input_encoded + special_token_id )
_UpperCAmelCase = tokenizer.decode(snake_case , skip_special_tokens=snake_case )
self.assertTrue(special_token not in decoded )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = self.get_tokenizers(do_lower_case=snake_case )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
_UpperCAmelCase = chr(0xe_0_0_5 )
_UpperCAmelCase = chr(0xe_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=snake_case )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'additional_special_tokens': [SPECIAL_TOKEN_2]} )
_UpperCAmelCase = tokenizer.tokenize(snake_case )
_UpperCAmelCase = tokenizer.tokenize(snake_case )
self.assertEqual(len(snake_case ) , 1 )
self.assertEqual(len(snake_case ) , 1 )
self.assertEqual(token_a[0] , snake_case )
self.assertEqual(token_a[0] , snake_case )
@require_tokenizers
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = self.get_tokenizers(do_lower_case=snake_case )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
_UpperCAmelCase = 0xe_0_0_6
_UpperCAmelCase = chr(snake_case )
_UpperCAmelCase = AddedToken(snake_case , lstrip=snake_case )
tokenizer.add_special_tokens({'additional_special_tokens': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(snake_case )
tokenizer.from_pretrained(snake_case )
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(snake_case )
with open(os.path.join(snake_case , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_UpperCAmelCase = json.load(snake_case )
with open(os.path.join(snake_case , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_UpperCAmelCase = json.load(snake_case )
# a special token for Canine can be defined as follows:
_UpperCAmelCase = 0xe_0_0_6
_UpperCAmelCase = chr(snake_case )
_UpperCAmelCase = [new_token_a]
_UpperCAmelCase = [new_token_a]
with open(os.path.join(snake_case , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(snake_case , snake_case )
with open(os.path.join(snake_case , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(snake_case , snake_case )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_UpperCAmelCase = tokenizer_class.from_pretrained(snake_case , extra_ids=0 )
self.assertIn(snake_case , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
_UpperCAmelCase = 0xe_0_0_7
_UpperCAmelCase = chr(snake_case )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_UpperCAmelCase = [AddedToken(snake_case , lstrip=snake_case )]
_UpperCAmelCase = tokenizer_class.from_pretrained(
snake_case , additional_special_tokens=snake_case , extra_ids=0 )
self.assertIn(snake_case , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = self.get_tokenizers(do_lower_case=snake_case )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
_UpperCAmelCase = 'hello world'
if self.space_between_special_tokens:
_UpperCAmelCase = '[CLS] hello world [SEP]'
else:
_UpperCAmelCase = input
_UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case )
_UpperCAmelCase = tokenizer.decode(snake_case , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(snake_case , [output, output.lower()] )
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
_UpperCAmelCase = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_UpperCAmelCase = 'a'
_UpperCAmelCase = ord(snake_case )
for attr in attributes_list:
setattr(snake_case , attr + '_id' , snake_case )
self.assertEqual(getattr(snake_case , snake_case ) , snake_case )
self.assertEqual(getattr(snake_case , attr + '_id' ) , snake_case )
setattr(snake_case , attr + '_id' , snake_case )
self.assertEqual(getattr(snake_case , snake_case ) , snake_case )
self.assertEqual(getattr(snake_case , attr + '_id' ) , snake_case )
setattr(snake_case , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(snake_case , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(snake_case , 'additional_special_tokens_ids' ) , [] )
_UpperCAmelCase = 0xe_0_0_6
_UpperCAmelCase = chr(snake_case )
setattr(snake_case , 'additional_special_tokens_ids' , [additional_special_token_id] )
self.assertListEqual(getattr(snake_case , 'additional_special_tokens' ) , [additional_special_token] )
self.assertListEqual(getattr(snake_case , 'additional_special_tokens_ids' ) , [additional_special_token_id] )
def lowerCamelCase_ ( self ) -> List[str]:
pass
def lowerCamelCase_ ( self ) -> Tuple:
pass
def lowerCamelCase_ ( self ) -> Union[str, Any]:
pass
def lowerCamelCase_ ( self ) -> Union[str, Any]:
pass
def lowerCamelCase_ ( self ) -> List[str]:
pass
def lowerCamelCase_ ( self ) -> Optional[Any]:
pass
def lowerCamelCase_ ( self ) -> Any:
pass
def lowerCamelCase_ ( self ) -> Optional[Any]:
pass
| 573
|
"""simple docstring"""
lowercase = 9.80_665
def UpperCAmelCase ( A : float , A : float , A : float = g ):
'''simple docstring'''
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 573
| 1
|
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def snake_case__ ( ) -> None:
'''simple docstring'''
print("""Making key files...""" )
make_key_files("""rsa""" , 1024 )
print("""Key files generation successful.""" )
def snake_case__ ( a ) -> tuple[tuple[int, int], tuple[int, int]]:
'''simple docstring'''
print("""Generating prime p...""" )
snake_case__ = rabinMiller.generate_large_prime(a )
print("""Generating prime q...""" )
snake_case__ = rabinMiller.generate_large_prime(a )
snake_case__ = p * q
print("""Generating e that is relatively prime to (p - 1) * (q - 1)...""" )
while True:
snake_case__ = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(a , (p - 1) * (q - 1) ) == 1:
break
print("""Calculating d that is mod inverse of e...""" )
snake_case__ = cryptoMath.find_mod_inverse(a , (p - 1) * (q - 1) )
snake_case__ = (n, e)
snake_case__ = (n, d)
return (public_key, private_key)
def snake_case__ ( a , a ) -> None:
'''simple docstring'''
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("""\nWARNING:""" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
snake_case__ , snake_case__ = generate_key(a )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , """w""" ) as out_file:
out_file.write(F"""{key_size},{public_key[0]},{public_key[1]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , """w""" ) as out_file:
out_file.write(F"""{key_size},{private_key[0]},{private_key[1]}""" )
if __name__ == "__main__":
main()
| 566
|
'''simple docstring'''
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
a__ = logging.get_logger(__name__)
def snake_case__ ( a , a ) -> Optional[int]:
'''simple docstring'''
snake_case__ = set()
snake_case__ = []
def parse_line(a ):
for line in fp:
if isinstance(a , a ):
snake_case__ = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(a ) > 0:
snake_case__ = """\n""".join(a )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(a )
buffer.clear()
continue
else:
snake_case__ = line.strip()
buffer.append(a )
if from_gh:
for filename in os.listdir(a ):
snake_case__ = os.path.join(a , a )
if not os.path.isdir(a ):
# read the file
if filename != "warnings.txt":
continue
with open(a ) as fp:
parse_line(a )
else:
try:
with zipfile.ZipFile(a ) as z:
for filename in z.namelist():
if not os.path.isdir(a ):
# read the file
if filename != "warnings.txt":
continue
with z.open(a ) as fp:
parse_line(a )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def snake_case__ ( a , a ) -> int:
'''simple docstring'''
snake_case__ = set()
snake_case__ = [os.path.join(a , a ) for p in os.listdir(a ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(a , a ) )
return selected_warnings
if __name__ == "__main__":
def snake_case__ ( a ) -> int:
'''simple docstring'''
return values.split(""",""" )
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
a__ = parser.parse_args()
a__ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
a__ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
a__ = extract_warnings(args.output_dir, args.targets)
a__ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 566
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[Any] =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str ={
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class A_ ( A__ ):
_A :Any = '''decision_transformer'''
_A :Any = ['''past_key_values''']
_A :List[Any] = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : str , snake_case__ : Any=17 , snake_case__ : List[Any]=4 , snake_case__ : List[str]=1_28 , snake_case__ : Tuple=40_96 , snake_case__ : List[Any]=True , snake_case__ : str=1 , snake_case__ : Any=10_24 , snake_case__ : str=3 , snake_case__ : List[str]=1 , snake_case__ : Optional[int]=None , snake_case__ : str="relu" , snake_case__ : List[Any]=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : str=0.1 , snake_case__ : List[Any]=1E-5 , snake_case__ : List[str]=0.02 , snake_case__ : str=True , snake_case__ : Optional[int]=True , snake_case__ : str=5_02_56 , snake_case__ : int=5_02_56 , snake_case__ : List[Any]=False , snake_case__ : Tuple=False , **snake_case__ : List[str] , ):
lowercase = state_dim
lowercase = act_dim
lowercase = hidden_size
lowercase = max_ep_len
lowercase = action_tanh
lowercase = vocab_size
lowercase = n_positions
lowercase = n_layer
lowercase = n_head
lowercase = n_inner
lowercase = activation_function
lowercase = resid_pdrop
lowercase = embd_pdrop
lowercase = attn_pdrop
lowercase = layer_norm_epsilon
lowercase = initializer_range
lowercase = scale_attn_weights
lowercase = use_cache
lowercase = scale_attn_by_inverse_layer_idx
lowercase = reorder_and_upcast_attn
lowercase = bos_token_id
lowercase = eos_token_id
super().__init__(bos_token_id=_a , eos_token_id=_a , **_a )
| 428
|
'''simple docstring'''
import json
import sys
def _lowerCAmelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
with open(_UpperCamelCase , encoding='utf-8' ) as f:
_SCREAMING_SNAKE_CASE =json.load(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =results[benchmark_name]
_SCREAMING_SNAKE_CASE =benchmark_name.split('/' )[-1]
output_md.append(f"### Benchmark: {benchmark_file_name}" )
_SCREAMING_SNAKE_CASE ='| metric |'
_SCREAMING_SNAKE_CASE ='|--------|'
_SCREAMING_SNAKE_CASE ='| new / old (diff) |'
for metric_name in sorted(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =benchmark_res[metric_name]
_SCREAMING_SNAKE_CASE =metric_vals['new']
_SCREAMING_SNAKE_CASE =metric_vals.get('old' , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =metric_vals.get('diff' , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =f" {new_val:f}" if isinstance(_UpperCamelCase , (int, float) ) else 'None'
if old_val is not None:
val_str += f" / {old_val:f}" if isinstance(_UpperCamelCase , (int, float) ) else "None"
if dif_val is not None:
val_str += f" ({dif_val:f})" if isinstance(_UpperCamelCase , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.writelines('\n'.join(_UpperCamelCase ) )
if __name__ == "__main__":
lowerCamelCase : Dict = sys.argv[1]
lowerCamelCase : List[Any] = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 405
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ : Optional[Any] = logging.get_logger(__name__)
def _snake_case ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple=False ):
A__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """deit.embeddings.cls_token"""),
("""dist_token""", """deit.embeddings.distillation_token"""),
("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """deit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
A__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("""norm.weight""", """deit.layernorm.weight"""),
("""norm.bias""", """deit.layernorm.bias"""),
("""head.weight""", """cls_classifier.weight"""),
("""head.bias""", """cls_classifier.bias"""),
("""head_dist.weight""", """distillation_classifier.weight"""),
("""head_dist.bias""", """distillation_classifier.bias"""),
] )
return rename_keys
def _snake_case ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str=False ):
for i in range(config.num_hidden_layers ):
if base_model:
A__ = """"""
else:
A__ = """deit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
A__ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def _snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple ):
A__ = dct.pop(UpperCAmelCase_ )
A__ = val
def _snake_case ( ):
A__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def _snake_case ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict ):
A__ = DeiTConfig()
# all deit models have fine-tuned heads
A__ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
A__ = 1000
A__ = """huggingface/label-files"""
A__ = """imagenet-1k-id2label.json"""
A__ = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
A__ = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = int(deit_name[-6:-4] )
A__ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("""tiny""" ):
A__ = 192
A__ = 768
A__ = 12
A__ = 3
elif deit_name[9:].startswith("""small""" ):
A__ = 384
A__ = 1536
A__ = 12
A__ = 6
if deit_name[9:].startswith("""base""" ):
pass
elif deit_name[4:].startswith("""large""" ):
A__ = 1024
A__ = 4096
A__ = 24
A__ = 16
# load original model from timm
A__ = timm.create_model(UpperCAmelCase_ , pretrained=UpperCAmelCase_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A__ = timm_model.state_dict()
A__ = create_rename_keys(UpperCAmelCase_ , UpperCAmelCase_ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
read_in_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# load HuggingFace model
A__ = DeiTForImageClassificationWithTeacher(UpperCAmelCase_ ).eval()
model.load_state_dict(UpperCAmelCase_ )
# Check outputs on an image, prepared by DeiTImageProcessor
A__ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
A__ = DeiTImageProcessor(size=UpperCAmelCase_ , crop_size=config.image_size )
A__ = image_processor(images=prepare_img() , return_tensors="""pt""" )
A__ = encoding["""pixel_values"""]
A__ = model(UpperCAmelCase_ )
A__ = timm_model(UpperCAmelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCAmelCase_ , outputs.logits , atol=1e-3 )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 500
|
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "segformer"
def __init__( self: Optional[Any] , UpperCamelCase: Union[str, Any]=3 , UpperCamelCase: int=4 , UpperCamelCase: int=[2, 2, 2, 2] , UpperCamelCase: List[str]=[8, 4, 2, 1] , UpperCamelCase: List[Any]=[32, 64, 1_60, 2_56] , UpperCamelCase: Any=[7, 3, 3, 3] , UpperCamelCase: Union[str, Any]=[4, 2, 2, 2] , UpperCamelCase: Tuple=[1, 2, 5, 8] , UpperCamelCase: Optional[int]=[4, 4, 4, 4] , UpperCamelCase: Dict="gelu" , UpperCamelCase: Optional[Any]=0.0 , UpperCamelCase: List[str]=0.0 , UpperCamelCase: Union[str, Any]=0.1 , UpperCamelCase: List[Any]=0.02 , UpperCamelCase: int=0.1 , UpperCamelCase: Optional[int]=1e-6 , UpperCamelCase: int=2_56 , UpperCamelCase: Union[str, Any]=2_55 , **UpperCamelCase: Optional[Any] , ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , UpperCamelCase , )
A__ = num_channels
A__ = num_encoder_blocks
A__ = depths
A__ = sr_ratios
A__ = hidden_sizes
A__ = patch_sizes
A__ = strides
A__ = mlp_ratios
A__ = num_attention_heads
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = classifier_dropout_prob
A__ = initializer_range
A__ = drop_path_rate
A__ = layer_norm_eps
A__ = decoder_hidden_size
A__ = kwargs.get("""reshape_last_stage""" , UpperCamelCase )
A__ = semantic_loss_ignore_index
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = version.parse("1.11" )
@property
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCamelCase ( self: int ):
"""simple docstring"""
return 1e-4
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return 12
| 500
| 1
|
import math
def lowerCamelCase__ ( _a , _a):
if (
not isinstance(lowerCamelCase_ , (int, float))
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1.")
return apparent_power * power_factor
def lowerCamelCase__ ( _a , _a):
if (
not isinstance(lowerCamelCase_ , (int, float))
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1.")
return apparent_power * math.sqrt(1 - power_factor**2)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25
|
from manim import *
class __magic_name__ ( A__ ):
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> int:
'''simple docstring'''
UpperCAmelCase = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase = [mem.copy() for i in range(6 )]
UpperCAmelCase = [mem.copy() for i in range(6 )]
UpperCAmelCase = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
UpperCAmelCase = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
UpperCAmelCase = VGroup(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
UpperCAmelCase = Text("CPU" , font_size=24 )
UpperCAmelCase = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCamelCase__ )
UpperCAmelCase = [mem.copy() for i in range(1 )]
UpperCAmelCase = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
UpperCAmelCase = Text("GPU" , font_size=24 )
UpperCAmelCase = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
gpu.align_to(UpperCamelCase__ , UpperCamelCase__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(UpperCamelCase__ )
UpperCAmelCase = [mem.copy() for i in range(6 )]
UpperCAmelCase = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
UpperCAmelCase = Text("Model" , font_size=24 )
UpperCAmelCase = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(UpperCamelCase__ , run_time=1 ) , Create(UpperCamelCase__ , run_time=1 ) , Create(UpperCamelCase__ , run_time=1 ) , )
UpperCAmelCase = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
UpperCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase__ , run_time=2.5 ) , Write(UpperCamelCase__ ) , Write(UpperCamelCase__ ) )
self.add(UpperCamelCase__ )
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
for i, rect in enumerate(UpperCamelCase__ ):
UpperCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase__ , opacity=0.7 )
cpu_target.move_to(UpperCamelCase__ )
cpu_target.generate_target()
UpperCAmelCase = 0.46 / 4
UpperCAmelCase = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCamelCase__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=UpperCamelCase__ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=UpperCamelCase__ , buff=0.0 )
cpu_targs.append(UpperCamelCase__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(UpperCamelCase__ ) )
second_animations.append(MoveToTarget(UpperCamelCase__ , run_time=1.5 ) )
self.play(*UpperCamelCase__ )
self.play(*UpperCamelCase__ )
self.wait()
| 323
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""",
"""roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""",
}
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Optional[int] = "roberta"
def __init__(self : Union[str, Any] , _A : List[Any]=5_0_2_6_5 , _A : Dict=7_6_8 , _A : Tuple=1_2 , _A : Optional[Any]=1_2 , _A : int=3_0_7_2 , _A : List[str]="gelu" , _A : Tuple=0.1 , _A : Dict=0.1 , _A : Optional[int]=5_1_2 , _A : Dict=2 , _A : Optional[Any]=0.02 , _A : Optional[Any]=1E-12 , _A : str=1 , _A : Dict=0 , _A : Optional[int]=2 , _A : int="absolute" , _A : Any=True , _A : Union[str, Any]=None , **_A : Optional[int] , ) -> Tuple:
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
snake_case = classifier_dropout
class lowerCamelCase ( A_ ):
@property
def UpperCAmelCase(self : int ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 707
|
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
_A = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class lowerCamelCase :
def __init__(self : Any , _A : int = 1_4 ) -> None:
if group not in primes:
raise ValueError("Unsupported Group" )
snake_case = primes[group]["prime"]
snake_case = primes[group]["generator"]
snake_case = int(hexlify(urandom(3_2 ) ) , base=1_6 )
def UpperCAmelCase(self : Any ) -> str:
return hex(self.__private_key )[2:]
def UpperCAmelCase(self : Tuple ) -> str:
snake_case = pow(self.generator , self.__private_key , self.prime )
return hex(_A )[2:]
def UpperCAmelCase(self : Optional[int] , _A : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_A , (self.prime - 1) // 2 , self.prime ) == 1
)
def UpperCAmelCase(self : List[Any] , _A : str ) -> str:
snake_case = int(_A , base=1_6 )
if not self.is_valid_public_key(_A ):
raise ValueError("Invalid public key" )
snake_case = pow(_A , self.__private_key , self.prime )
return shaaaa(str(_A ).encode() ).hexdigest()
@staticmethod
def UpperCAmelCase(_A : int , _A : int ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_A , (prime - 1) // 2 , _A ) == 1
)
@staticmethod
def UpperCAmelCase(_A : str , _A : str , _A : int = 1_4 ) -> str:
snake_case = int(_A , base=1_6 )
snake_case = int(_A , base=1_6 )
snake_case = primes[group]["prime"]
if not DiffieHellman.is_valid_public_key_static(_A , _A ):
raise ValueError("Invalid public key" )
snake_case = pow(_A , _A , _A )
return shaaaa(str(_A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294
| 0
|
def lowerCAmelCase( __lowerCamelCase ):
__a = [0 for i in range(len(__lowerCamelCase ) )]
# initialize interval's left pointer and right pointer
__a , __a = 0, 0
for i in range(1 , len(__lowerCamelCase ) ):
# case when current index is inside the interval
if i <= right_pointer:
__a = min(right_pointer - i + 1 , z_result[i - left_pointer] )
__a = min_edge
while go_next(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
__a , __a = i, i + z_result[i] - 1
return z_result
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
return i + z_result[i] < len(__lowerCamelCase ) and s[z_result[i]] == s[i + z_result[i]]
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
__a = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
__a = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(__lowerCamelCase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 559
|
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCamelCase_ : Any = logging.getLogger(__name__)
lowerCamelCase_ : int = 50 # max width of layer names
lowerCamelCase_ : Any = 70 # max width of quantizer names
def lowerCAmelCase( __lowerCamelCase ):
__a = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=__lowerCamelCase , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=__lowerCamelCase , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=__lowerCamelCase , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=__lowerCamelCase , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=__lowerCamelCase , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=__lowerCamelCase , type=__lowerCamelCase , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=__lowerCamelCase , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def lowerCAmelCase( __lowerCamelCase ):
if args.calibrator == "max":
__a = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
__a = 'histogram'
elif args.calibrator == "mse":
__a = 'histogram'
else:
raise ValueError(f'''Invalid calibrator {args.calibrator}''' )
__a = QuantDescriptor(num_bits=args.aprec , calib_method=__lowerCamelCase )
__a = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(__lowerCamelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(__lowerCamelCase )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=False , __lowerCamelCase=False ):
logger.info('Configuring Model for Quantization' )
logger.info(f'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(__lowerCamelCase , ['embeddings'] , which='weight' , _disabled=__lowerCamelCase )
if args.quant_disable:
set_quantizer_by_name(__lowerCamelCase , [''] , _disabled=__lowerCamelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(__lowerCamelCase , args.quant_disable_keyword , _disabled=__lowerCamelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(__lowerCamelCase , [r'layer.\d+.' + args.quant_disable_layer_module] , _disabled=__lowerCamelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(__lowerCamelCase , [r'layer.\d+.' + args.quant_enable_layer_module] , _disabled=__lowerCamelCase )
if args.recalibrate_weights:
recalibrate_weights(__lowerCamelCase )
if args.fuse_qkv:
fuse_qkv(__lowerCamelCase , __lowerCamelCase )
if args.clip_gelu:
clip_gelu(__lowerCamelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(__lowerCamelCase )
def lowerCAmelCase( __lowerCamelCase ):
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'''{name:80}: {module}''' )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(__lowerCamelCase )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
def fusea(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
for mod in [qq, qk, qv]:
if not hasattr(__lowerCamelCase , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
__a = qq._amax.detach().item()
__a = qk._amax.detach().item()
__a = qv._amax.detach().item()
__a = max(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
qq._amax.fill_(__lowerCamelCase )
qk._amax.fill_(__lowerCamelCase )
qv._amax.fill_(__lowerCamelCase )
logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(f'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
__a = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=__lowerCamelCase )
__a = mod._input_quantizer._amax.data.detach().item()
logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def lowerCAmelCase( __lowerCamelCase ):
for name, mod in model.named_modules():
if hasattr(__lowerCamelCase , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
__a = mod.weight.shape[0]
__a = mod._weight_quantizer._amax.detach()
__a = torch.ones(__lowerCamelCase , dtype=amax.dtype , device=amax.device ) * amax
print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def lowerCAmelCase( __lowerCamelCase ):
for name, mod in model.named_modules():
if hasattr(__lowerCamelCase , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
__a = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
__a = set(range(len(mod.weight.size() ) ) ) - axis_set
__a = pytorch_quantization.utils.reduce_amax(mod.weight , axis=__lowerCamelCase , keepdims=__lowerCamelCase ).detach()
logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
__a = amax
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase=25 , __lowerCamelCase=180 , __lowerCamelCase=None ):
if ignore is None:
__a = []
elif not isinstance(__lowerCamelCase , __lowerCamelCase ):
__a = [ignore]
__a = 0
for name, mod in model.named_modules():
if not hasattr(__lowerCamelCase , 'weight' ):
continue
__a = max(__lowerCamelCase , len(__lowerCamelCase ) )
for name, mod in model.named_modules():
__a = getattr(__lowerCamelCase , '_input_quantizer' , __lowerCamelCase )
__a = getattr(__lowerCamelCase , '_weight_quantizer' , __lowerCamelCase )
if not hasattr(__lowerCamelCase , 'weight' ):
continue
if type(__lowerCamelCase ) in ignore:
continue
if [True for s in ignore if type(__lowerCamelCase ) is str and s in name]:
continue
__a = f'''Act:{input_q.extra_repr()}'''
__a = f'''Wgt:{weight_q.extra_repr()}'''
__a = f'''{name:{name_width}} {act_str} {wgt_str}'''
if len(__lowerCamelCase ) <= line_width:
logger.info(__lowerCamelCase )
else:
logger.info(f'''{name:{name_width}} {act_str}''' )
logger.info(f'''{" ":{name_width}} {wgt_str}''' )
def lowerCAmelCase( __lowerCamelCase ):
__a = 0
for name, mod in model.named_modules():
if isinstance(__lowerCamelCase , pytorch_quantization.nn.TensorQuantizer ):
print(f'''{name:80} {mod}''' )
count += 1
print(f'''{count} TensorQuantizers found in model''' )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__a = getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if quantizer_mod is not None:
assert hasattr(__lowerCamelCase , __lowerCamelCase )
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
logger.warning(f'''{name} has no {quantizer}''' )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="both" , **__lowerCamelCase ):
__a = f'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(__lowerCamelCase , __lowerCamelCase , '_input_quantizer' , __lowerCamelCase , __lowerCamelCase )
if which in ["weight", "both"]:
set_quantizer(__lowerCamelCase , __lowerCamelCase , '_weight_quantizer' , __lowerCamelCase , __lowerCamelCase )
logger.info(__lowerCamelCase )
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ):
for name, mod in model.named_modules():
if hasattr(__lowerCamelCase , '_input_quantizer' ) or hasattr(__lowerCamelCase , '_weight_quantizer' ):
for n in names:
if re.search(__lowerCamelCase , __lowerCamelCase ):
set_quantizers(__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(__lowerCamelCase , __lowerCamelCase ):
__a = f'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
logger.info(__lowerCamelCase )
| 559
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
lowerCamelCase__ = parser.parse_args()
if args.model_type == "bert":
lowerCamelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase__ = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCamelCase__ = state_dict[f'{prefix}.embeddings.{w}.weight']
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[f'{prefix}.embeddings.LayerNorm.{w}']
lowerCamelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'
]
lowerCamelCase__ = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'
]
lowerCamelCase__ = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'
]
lowerCamelCase__ = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'
]
lowerCamelCase__ = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'
]
lowerCamelCase__ = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'
]
lowerCamelCase__ = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'
]
lowerCamelCase__ = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'
]
std_idx += 1
lowerCamelCase__ = state_dict["""cls.predictions.decoder.weight"""]
lowerCamelCase__ = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[f'cls.predictions.transform.dense.{w}']
lowerCamelCase__ = state_dict[f'cls.predictions.transform.LayerNorm.{w}']
print(f'N layers selected for distillation: {std_idx}')
print(f'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(f'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 708
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class A__ ( _lowerCamelCase):
A_ : Union[PIL.Image.Image, np.ndarray]
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
super().__init__()
self.register_modules(
prior=_SCREAMING_SNAKE_CASE , image_encoder=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , renderer=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if latents is None:
__lowerCAmelCase : List[str] = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
__lowerCAmelCase : Any = latents.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = latents * scheduler.init_noise_sigma
return latents
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowerCAmelCase : Tuple = torch.device(f"cuda:{gpu_id}" )
__lowerCAmelCase : Union[str, Any] = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@property
def __lowerCamelCase ( self ):
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(image[0] , torch.Tensor ):
__lowerCAmelCase : str = torch.cat(_SCREAMING_SNAKE_CASE , axis=0 ) if image[0].ndim == 4 else torch.stack(_SCREAMING_SNAKE_CASE , axis=0 )
if not isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
__lowerCAmelCase : Optional[int] = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
__lowerCAmelCase : Dict = image.to(dtype=self.image_encoder.dtype , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self.image_encoder(_SCREAMING_SNAKE_CASE )['last_hidden_state']
__lowerCAmelCase : Optional[int] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
__lowerCAmelCase : Tuple = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : List[Any] = torch.zeros_like(_SCREAMING_SNAKE_CASE )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCAmelCase : Any = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 25 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 64 , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ):
if isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
__lowerCAmelCase : Union[str, Any] = 1
elif isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
__lowerCAmelCase : Tuple = image.shape[0]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
__lowerCAmelCase : Any = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_SCREAMING_SNAKE_CASE )}" )
__lowerCAmelCase : Optional[Any] = self._execution_device
__lowerCAmelCase : Optional[Any] = batch_size * num_images_per_prompt
__lowerCAmelCase : Any = guidance_scale > 1.0
__lowerCAmelCase : List[Any] = self._encode_image(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# prior
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self.scheduler.timesteps
__lowerCAmelCase : Optional[int] = self.prior.config.num_embeddings
__lowerCAmelCase : List[str] = self.prior.config.embedding_dim
__lowerCAmelCase : Any = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
__lowerCAmelCase : str = latents.reshape(latents.shape[0] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
__lowerCAmelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCAmelCase : Optional[int] = self.scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.prior(
_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , proj_embedding=_SCREAMING_SNAKE_CASE , ).predicted_image_embedding
# remove the variance
__lowerCAmelCase , __lowerCAmelCase : Tuple = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
__lowerCAmelCase , __lowerCAmelCase : List[Any] = noise_pred.chunk(2 )
__lowerCAmelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
__lowerCAmelCase : Dict = self.scheduler.step(
_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , sample=_SCREAMING_SNAKE_CASE , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = []
for i, latent in enumerate(_SCREAMING_SNAKE_CASE ):
print()
__lowerCAmelCase : int = self.renderer.decode(
latent[None, :] , _SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = torch.stack(_SCREAMING_SNAKE_CASE )
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" )
__lowerCAmelCase : int = images.cpu().numpy()
if output_type == "pil":
__lowerCAmelCase : Dict = [self.numpy_to_pil(_SCREAMING_SNAKE_CASE ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 549
| 0
|
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Tuple = word.split()
def justify(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
_a : Tuple = max_width - width
_a : Optional[Any] = len(UpperCamelCase__ )
if len(UpperCamelCase__ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_a : Any = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_a : int = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_a : List[str] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(UpperCamelCase__ ):
num_spaces_between_words_list[i] += 1
_a : Tuple = []
for i in range(UpperCamelCase__ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(UpperCamelCase__ )
_a : List[str] = []
_a : list[str] = []
_a : Optional[Any] = 0
for word in words:
if width + len(UpperCamelCase__ ) + len(UpperCamelCase__ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(UpperCamelCase__ )
width += len(UpperCamelCase__ )
else:
# justify the line and add it to result
answer.append(justify(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
# reset new line and new width
_a , _a : Union[str, Any] = [word], len(UpperCamelCase__ )
_a : Optional[int] = max_width - width - len(UpperCamelCase__ )
answer.append(""" """.join(UpperCamelCase__ ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 389
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCamelCase :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict=13 , UpperCAmelCase__ : str=32 , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : str=[1, 2, 1] , UpperCAmelCase__ : Union[str, Any]=[2, 2, 4] , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : List[str]=2.0 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : int=0.0 , UpperCAmelCase__ : Optional[Any]=0.1 , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : str=True , UpperCAmelCase__ : List[str]=0.0_2 , UpperCAmelCase__ : List[str]=1E-5 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Dict=10 , UpperCAmelCase__ : str=8 , UpperCAmelCase__ : Tuple=["stage1", "stage2", "stage3"] , UpperCAmelCase__ : Union[str, Any]=[1, 2, 3] , ) -> str:
_a : Union[str, Any] = parent
_a : str = batch_size
_a : int = image_size
_a : Optional[Any] = patch_size
_a : Tuple = num_channels
_a : str = embed_dim
_a : int = depths
_a : List[Any] = num_heads
_a : int = window_size
_a : Optional[int] = mlp_ratio
_a : Optional[int] = qkv_bias
_a : Dict = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : Tuple = drop_path_rate
_a : List[Any] = hidden_act
_a : List[Any] = use_absolute_embeddings
_a : Optional[Any] = patch_norm
_a : Dict = layer_norm_eps
_a : Dict = initializer_range
_a : Union[str, Any] = is_training
_a : List[str] = scope
_a : Any = use_labels
_a : Any = type_sequence_label_size
_a : Dict = encoder_stride
_a : Optional[int] = out_features
_a : Any = out_indices
def _lowercase ( self : Optional[Any] ) -> int:
_a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Tuple = None
if self.use_labels:
_a : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Any = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Optional[int] ) -> List[str]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowercase ( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple ) -> List[str]:
_a : int = MaskFormerSwinModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a : Any = model(UpperCAmelCase__ )
_a : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_a : List[str] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict ) -> List[Any]:
_a : Optional[Any] = MaskFormerSwinBackbone(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a : Union[str, Any] = model(UpperCAmelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCAmelCase__ ):
_a : Dict = ["""stem"""]
_a : Optional[int] = MaskFormerSwinBackbone(config=UpperCAmelCase__ )
def _lowercase ( self : Optional[int] ) -> Tuple:
_a : Optional[int] = self.prepare_config_and_inputs()
_a , _a , _a : str = config_and_inputs
_a : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase : str = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase : Optional[Any] = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
UpperCamelCase : str = False
UpperCamelCase : List[str] = False
UpperCamelCase : str = False
UpperCamelCase : Tuple = False
UpperCamelCase : Optional[int] = False
def _lowercase ( self : Any ) -> int:
_a : Optional[int] = MaskFormerSwinModelTester(self )
_a : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase__ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def _lowercase ( self : int ) -> str:
pass
def _lowercase ( self : Dict ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : Tuple ) -> Union[str, Any]:
return
def _lowercase ( self : List[Any] ) -> Optional[Any]:
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _lowercase ( self : str ) -> Tuple:
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase__ )
@unittest.skip("""Swin does not use inputs_embeds""" )
def _lowercase ( self : Dict ) -> List[str]:
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def _lowercase ( self : Optional[Any] ) -> Dict:
pass
def _lowercase ( self : int ) -> Union[str, Any]:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Tuple = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , nn.Linear ) )
def _lowercase ( self : Any ) -> Tuple:
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(UpperCAmelCase__ )
_a : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Dict = [*signature.parameters.keys()]
_a : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def _lowercase ( self : Optional[Any] ) -> int:
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def _lowercase ( self : Any ) -> List[Any]:
pass
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str ) -> Union[str, Any]:
_a : Optional[Any] = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
with torch.no_grad():
_a : Tuple = model(**self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
_a : Dict = outputs.hidden_states
_a : Optional[int] = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
# Swin has a different seq_length
_a : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_a : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowercase ( self : str ) -> Dict:
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_a : Optional[Any] = True
self.check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : str = True
self.check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Tuple = 3
_a : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_a : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_a : Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_a : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_a : Optional[int] = True
self.check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Union[str, Any] = True
self.check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def _lowercase ( self : Any ) -> Any:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : List[str] ) -> Optional[int]:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
pass
def _lowercase ( self : Dict ) -> List[str]:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(UpperCAmelCase__ : List[Any] ):
_a : int = 0
return t
def check_equivalence(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any]={} ):
with torch.no_grad():
_a : Union[str, Any] = model(**UpperCAmelCase__ , return_dict=UpperCAmelCase__ , **UpperCAmelCase__ )
_a : Any = model(**UpperCAmelCase__ , return_dict=UpperCAmelCase__ , **UpperCAmelCase__ ).to_tuple()
def recursive_check(UpperCAmelCase__ : int , UpperCAmelCase__ : str ):
if isinstance(UpperCAmelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
recursive_check(UpperCAmelCase__ , UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCAmelCase__ , UpperCAmelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCAmelCase__ ) , set_nan_tensor_to_zero(UpperCAmelCase__ ) , atol=1E-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(UpperCAmelCase__ ).any()} and `inf`: {torch.isinf(UpperCAmelCase__ )}. Dict has"""
f""" `nan`: {torch.isnan(UpperCAmelCase__ ).any()} and `inf`: {torch.isinf(UpperCAmelCase__ )}."""
) , )
recursive_check(UpperCAmelCase__ , UpperCAmelCase__ )
for model_class in self.all_model_classes:
_a : Any = model_class(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
_a : Dict = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Any = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a : int = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
_a : List[Any] = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_a : Any = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
_a : Union[str, Any] = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , {"""output_hidden_states""": True} )
_a : int = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
_a : Tuple = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
check_equivalence(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , {"""output_hidden_states""": True} )
@require_torch
class UpperCamelCase ( unittest.TestCase , snake_case_ ):
UpperCamelCase : Tuple = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCamelCase : Dict = MaskFormerSwinConfig
def _lowercase ( self : int ) -> int:
_a : Union[str, Any] = MaskFormerSwinModelTester(self )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
_a : str = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
_a : Optional[int] = backbone_class(UpperCAmelCase__ )
backbone.to(UpperCAmelCase__ )
backbone.eval()
_a : Any = backbone(**UpperCAmelCase__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCAmelCase__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_a : Union[str, Any] = backbone(**UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_a , _a , _a : Optional[int] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_a : Any = backbone(**UpperCAmelCase__ , output_attentions=UpperCAmelCase__ )
self.assertIsNotNone(outputs.attentions )
| 389
| 1
|
"""simple docstring"""
import numpy as np
def _snake_case ( snake_case__ : np.ndarray , snake_case__ : np.ndarray , snake_case__ : float = 1e-12 , snake_case__ : int = 100 , ):
assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[1]
# Ensure proper dimensionality.
assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(snake_case__ ) == np.iscomplexobj(snake_case__ )
A = np.iscomplexobj(snake_case__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(snake_case__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
A = False
A = 0
A = 0
A = 1e12
while not convergence:
# Multiple matrix by the vector.
A = np.dot(snake_case__ , snake_case__ )
# Normalize the resulting output vector.
A = w / np.linalg.norm(snake_case__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
A = vector.conj().T if is_complex else vector.T
A = np.dot(snake_case__ , np.dot(snake_case__ , snake_case__ ) )
# Check convergence.
A = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
A = True
A = lambda_
if is_complex:
A = np.real(lambda_ )
return lambda_, vector
def _snake_case ( ):
A = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
A = np.array([41, 4, 20] )
A = real_input_matrix.astype(np.complexaaa )
A = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
A = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
A = real_input_matrix
A = real_vector
elif problem_type == "complex":
A = complex_input_matrix
A = complex_vector
# Our implementation.
A , A = power_iteration(snake_case__ , snake_case__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
A , A = np.linalg.eigh(snake_case__ )
# Last eigenvalue is the maximum one.
A = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
A = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(snake_case__ ) - np.abs(snake_case__ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 716
|
"""simple docstring"""
import sys
from collections import defaultdict
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ) -> int:
A = []
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> Optional[int]:
return self.node_position[vertex]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ,A_ : Any ) -> List[Any]:
A = pos
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : str ,A_ : Dict ,A_ : List[str] ) -> str:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
A = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
A = 2 * start + 1
else:
A = 2 * start + 2
if heap[smallest_child] < heap[start]:
A , A = heap[smallest_child], positions[smallest_child]
A , A = (
heap[start],
positions[start],
)
A , A = temp, tempa
A = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,A_ )
self.top_to_bottom(A_ ,A_ ,A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Optional[int] ,A_ : Dict ,A_ : str ,A_ : Union[str, Any] ) -> Dict:
A = position[index]
while index != 0:
A = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
A = heap[parent]
A = position[parent]
self.set_position(position[parent] ,A_ )
else:
A = val
A = temp
self.set_position(A_ ,A_ )
break
A = parent
else:
A = val
A = temp
self.set_position(A_ ,0 )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Dict ) -> Union[str, Any]:
A = len(A_ ) // 2 - 1
for i in range(A_ ,-1 ,-1 ):
self.top_to_bottom(A_ ,A_ ,len(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ,A_ : Dict ) -> Union[str, Any]:
A = positions[0]
A = sys.maxsize
self.top_to_bottom(A_ ,0 ,len(A_ ) ,A_ )
return temp
def _snake_case ( snake_case__ : Dict ):
A = Heap()
A = [0] * len(snake_case__ )
A = [-1] * len(snake_case__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
A = [] # Heap of Distance of vertices from their neighboring vertex
A = []
for vertex in range(len(snake_case__ ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case__ )
heap.node_position.append(snake_case__ )
A = []
A = 1
A = sys.maxsize
for neighbor, distance in adjacency_list[0]:
A = 0
A = distance
heap.heapify(snake_case__ , snake_case__ )
for _ in range(1 , len(snake_case__ ) ):
A = heap.delete_minimum(snake_case__ , snake_case__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
A = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case__ )]
):
A = distance
heap.bottom_to_top(
snake_case__ , heap.get_position(snake_case__ ) , snake_case__ , snake_case__ )
A = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowercase = int(input('''Enter number of edges: ''').strip())
_lowercase = defaultdict(list)
for _ in range(edges_number):
_lowercase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 22
| 0
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_snake_case : List[str] = logging.get_logger(__name__)
class a (enum.Enum ):
"""simple docstring"""
__UpperCAmelCase : str = 0
__UpperCAmelCase : str = 1
@add_end_docstrings(_lowerCAmelCase )
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Any = "generated"
def __init__( self : List[Any] , *lowerCamelCase : List[str] , **lowerCamelCase : str ) -> Any:
super().__init__(*lowerCamelCase , **lowerCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __snake_case ( self : Any , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : List[Any]=None , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : Tuple=None , **lowerCamelCase : Dict , ) -> List[str]:
__snake_case : List[Any] = {}
if truncation is not None:
__snake_case : Optional[int] = truncation
__snake_case : int = generate_kwargs
__snake_case : str = {}
if return_tensors is not None and return_type is None:
__snake_case : Dict = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
__snake_case : Optional[int] = return_type
if clean_up_tokenization_spaces is not None:
__snake_case : Any = clean_up_tokenization_spaces
if stop_sequence is not None:
__snake_case : Optional[Any] = self.tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
if len(lowerCamelCase ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
__snake_case : str = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __snake_case ( self : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ) -> List[Any]:
return True
def __snake_case ( self : Any , *lowerCamelCase : Dict , lowerCamelCase : Any ) -> int:
__snake_case : Union[str, Any] = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , lowerCamelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" )
__snake_case : str = ([prefix + arg for arg in args[0]],)
__snake_case : List[str] = True
elif isinstance(args[0] , lowerCamelCase ):
__snake_case : List[str] = (prefix + args[0],)
__snake_case : Optional[int] = False
else:
raise ValueError(
F' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`' )
__snake_case : str = self.tokenizer(*lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Optional[int] , *lowerCamelCase : Tuple , **lowerCamelCase : str ) -> List[Any]:
__snake_case : Any = super().__call__(*lowerCamelCase , **lowerCamelCase )
if (
isinstance(args[0] , lowerCamelCase )
and all(isinstance(lowerCamelCase , lowerCamelCase ) for el in args[0] )
and all(len(lowerCamelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __snake_case ( self : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[str]=TruncationStrategy.DO_NOT_TRUNCATE , **lowerCamelCase : str ) -> int:
__snake_case : int = self._parse_and_tokenize(lowerCamelCase , truncation=lowerCamelCase , **lowerCamelCase )
return inputs
def __snake_case ( self : Optional[Any] , lowerCamelCase : Optional[Any] , **lowerCamelCase : str ) -> Any:
if self.framework == "pt":
__snake_case , __snake_case : Dict = model_inputs["input_ids"].shape
elif self.framework == "tf":
__snake_case , __snake_case : Optional[Any] = tf.shape(model_inputs["input_ids"] ).numpy()
__snake_case : Union[str, Any] = generate_kwargs.get("min_length" , self.model.config.min_length )
__snake_case : int = generate_kwargs.get("max_length" , self.model.config.max_length )
self.check_inputs(lowerCamelCase , generate_kwargs["min_length"] , generate_kwargs["max_length"] )
__snake_case : Tuple = self.model.generate(**lowerCamelCase , **lowerCamelCase )
__snake_case : Union[str, Any] = output_ids.shape[0]
if self.framework == "pt":
__snake_case : int = output_ids.reshape(lowerCamelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
__snake_case : Tuple = tf.reshape(lowerCamelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __snake_case ( self : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : Optional[Any]=ReturnType.TEXT , lowerCamelCase : List[str]=False ) -> List[str]:
__snake_case : List[Any] = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
__snake_case : str = {F'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
__snake_case : List[Any] = {
F'{self.return_name}_text': self.tokenizer.decode(
lowerCamelCase , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase , )
}
records.append(lowerCamelCase )
return records
@add_end_docstrings(_lowerCAmelCase )
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : int = "summary"
def __call__( self : Dict , *lowerCamelCase : Tuple , **lowerCamelCase : Optional[int] ) -> Any:
return super().__call__(*lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Optional[Any] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ) -> bool:
if max_length < min_length:
logger.warning(F'Your min_length={min_length} must be inferior than your max_length={max_length}.' )
if input_length < max_length:
logger.warning(
F'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
"a summarization task, where outputs shorter than the input are typically wanted, you might "
F'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})' )
@add_end_docstrings(_lowerCAmelCase )
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = "translation"
def __snake_case ( self : List[Any] , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int ) -> int:
if input_length > 0.9 * max_length:
logger.warning(
F'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
"increasing your max_length manually, e.g. translator('...', max_length=400)" )
return True
def __snake_case ( self : Any , *lowerCamelCase : List[str] , lowerCamelCase : Dict=TruncationStrategy.DO_NOT_TRUNCATE , lowerCamelCase : Optional[int]=None , lowerCamelCase : Optional[Any]=None ) -> List[str]:
if getattr(self.tokenizer , "_build_translation_inputs" , lowerCamelCase ):
return self.tokenizer._build_translation_inputs(
*lowerCamelCase , return_tensors=self.framework , truncation=lowerCamelCase , src_lang=lowerCamelCase , tgt_lang=lowerCamelCase )
else:
return super()._parse_and_tokenize(*lowerCamelCase , truncation=lowerCamelCase )
def __snake_case ( self : str , lowerCamelCase : str=None , lowerCamelCase : Dict=None , **lowerCamelCase : List[Any] ) -> Optional[int]:
__snake_case , __snake_case , __snake_case : int = super()._sanitize_parameters(**lowerCamelCase )
if src_lang is not None:
__snake_case : Optional[Any] = src_lang
if tgt_lang is not None:
__snake_case : str = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
__snake_case : Union[str, Any] = kwargs.get("task" , self.task )
__snake_case : str = task.split("_" )
if task and len(lowerCamelCase ) == 4:
# translation, XX, to YY
__snake_case : Optional[int] = items[1]
__snake_case : Dict = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Tuple , *lowerCamelCase : int , **lowerCamelCase : List[str] ) -> str:
return super().__call__(*lowerCamelCase , **lowerCamelCase )
| 81
|
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__snake_case = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def _lowercase ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def _lowercase ( SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
UpperCamelCase = _TestCommandArgs(dataset=SCREAMING_SNAKE_CASE_ , all_configs=SCREAMING_SNAKE_CASE_ , save_infos=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = TestCommand(*SCREAMING_SNAKE_CASE_ )
test_command.run()
UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE_ , """README.md""" )
assert os.path.exists(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = DatasetInfosDict.from_directory(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2_351_563,
"""num_examples""": 10_000,
},
{
"""name""": """validation""",
"""num_bytes""": 238_418,
"""num_examples""": 1_000,
},
] , download_size=3_940_680 , dataset_size=2_589_981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
UpperCamelCase , UpperCamelCase = getattr(dataset_infos["""default"""] , SCREAMING_SNAKE_CASE_ ), getattr(expected_dataset_infos["""default"""] , SCREAMING_SNAKE_CASE_ )
if key == "num_bytes":
assert is_apercent_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif key == "splits":
assert list(SCREAMING_SNAKE_CASE_ ) == list(SCREAMING_SNAKE_CASE_ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 386
| 0
|
'''simple docstring'''
from typing import Any
import numpy as np
def __UpperCAmelCase ( __magic_name__ )-> Any:
"""simple docstring"""
return np.array_equal(__magic_name__ ,matrix.conjugate().T )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Optional[Any] = v.conjugate().T
snake_case_ : int = v_star.dot(__magic_name__ )
assert isinstance(__magic_name__ ,np.ndarray )
return (v_star_dot.dot(__magic_name__ )) / (v_star.dot(__magic_name__ ))
def __UpperCAmelCase ( )-> Dict:
"""simple docstring"""
snake_case_ : Any = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
snake_case_ : List[str] = np.array([[1], [2], [3]] )
assert is_hermitian(__magic_name__ ), F'''{a} is not hermitian.'''
print(rayleigh_quotient(__magic_name__ ,__magic_name__ ) )
snake_case_ : Union[str, Any] = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__magic_name__ ), F'''{a} is not hermitian.'''
assert rayleigh_quotient(__magic_name__ ,__magic_name__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 701
|
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class A_ :
"""simple docstring"""
def __init__( self :Dict ) -> List[str]:
'''simple docstring'''
snake_case_ : int = {}
def _A ( self :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=1 ) -> Any:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
snake_case_ : Optional[int] = [[w, v]]
if not self.graph.get(lowerCAmelCase__ ):
snake_case_ : Dict = []
def _A ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
return list(self.graph )
def _A ( self :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
def _A ( self :List[str] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :str=-1 ) -> str:
'''simple docstring'''
if s == d:
return []
snake_case_ : str = []
snake_case_ : Optional[int] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Dict = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Union[str, Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Tuple , lowerCAmelCase__ :int=-1 ) -> int:
'''simple docstring'''
if c == -1:
snake_case_ : Any = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Tuple , lowerCAmelCase__ :Dict=-2 ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : Tuple = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :List[str] , lowerCAmelCase__ :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _A ( self :Any , lowerCAmelCase__ :int ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Tuple , lowerCAmelCase__ :List[str]=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = []
snake_case_ : str = []
if s == -2:
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Optional[int] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCAmelCase__ ) != 0:
snake_case_ : int = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return sorted_nodes
def _A ( self :Dict ) -> Any:
'''simple docstring'''
snake_case_ : Dict = []
snake_case_ : Any = []
snake_case_ : str = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Optional[int] = -2
snake_case_ : Any = []
snake_case_ : List[Any] = s
snake_case_ : int = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[str] = s
snake_case_ : Optional[int] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = []
snake_case_ : Tuple = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : str = -2
snake_case_ : List[str] = []
snake_case_ : List[Any] = s
snake_case_ : List[str] = False
snake_case_ : Dict = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Any = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Tuple = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : List[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : int = s
snake_case_ : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[int]=-2 , lowerCAmelCase__ :Tuple=-1 ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Optional[Any] = time()
return end - begin
def _A ( self :Any , lowerCAmelCase__ :Tuple=-2 ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Any = time()
return end - begin
class A_ :
"""simple docstring"""
def __init__( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = {}
def _A ( self :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=1 ) -> str:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
snake_case_ : str = [[w, v]]
# add the other way
if self.graph.get(lowerCAmelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
snake_case_ : List[str] = [[w, u]]
def _A ( self :Dict , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCAmelCase__ )
# the other way round
if self.graph.get(lowerCAmelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCAmelCase__ )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Optional[Any]=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> int:
'''simple docstring'''
if s == d:
return []
snake_case_ : Any = []
snake_case_ : Dict = []
if s == -2:
snake_case_ : Optional[int] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : List[str] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCAmelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : str = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return visited
def _A ( self :Optional[int] , lowerCAmelCase__ :str=-1 ) -> List[Any]:
'''simple docstring'''
if c == -1:
snake_case_ : Optional[int] = floor(random() * 10_000 ) + 10
for i in range(lowerCAmelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case_ : str = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
def _A ( self :Any , lowerCAmelCase__ :Optional[Any]=-2 ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = deque()
snake_case_ : Optional[Any] = []
if s == -2:
snake_case_ : List[Any] = list(self.graph )[0]
d.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
while d:
snake_case_ : Optional[int] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _A ( self :str , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = []
snake_case_ : Optional[Any] = []
snake_case_ : Optional[Any] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : Optional[int] = []
snake_case_ : Tuple = s
snake_case_ : Optional[Any] = False
snake_case_ : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Optional[int] = len(lowerCAmelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[int] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Optional[Any] = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : List[Any] = s
snake_case_ : Dict = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return list(lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : int = []
snake_case_ : List[str] = list(self.graph )[0]
stack.append(lowerCAmelCase__ )
visited.append(lowerCAmelCase__ )
snake_case_ : Tuple = -2
snake_case_ : int = []
snake_case_ : int = s
snake_case_ : Optional[Any] = False
snake_case_ : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case_ : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case_ : Tuple = len(lowerCAmelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case_ : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case_ : Optional[Any] = True
if len(lowerCAmelCase__ ) != 0:
snake_case_ : Tuple = stack[len(lowerCAmelCase__ ) - 1]
else:
snake_case_ : Optional[int] = False
indirect_parents.append(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = s
snake_case_ : Tuple = ss
# check if se have reached the starting point
if len(lowerCAmelCase__ ) == 0:
return False
def _A ( self :Any ) -> Tuple:
'''simple docstring'''
return list(self.graph )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple=-2 , lowerCAmelCase__ :Optional[int]=-1 ) -> str:
'''simple docstring'''
snake_case_ : List[str] = time()
self.dfs(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = time()
return end - begin
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any]=-2 ) -> int:
'''simple docstring'''
snake_case_ : List[str] = time()
self.bfs(lowerCAmelCase__ )
snake_case_ : Tuple = time()
return end - begin
| 656
| 0
|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _a ( __lowercase ) -> Dict:
"""simple docstring"""
return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def _a ( ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = ArgumentParser(
'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=__lowercase )
__UpperCamelCase = parser.add_subparsers(help='datasets-cli command helpers' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__lowercase )
EnvironmentCommand.register_subcommand(__lowercase )
TestCommand.register_subcommand(__lowercase )
RunBeamCommand.register_subcommand(__lowercase )
DummyDataCommand.register_subcommand(__lowercase )
# Parse args
__UpperCamelCase = parser.parse_known_args()
if not hasattr(__lowercase , 'func' ):
parser.print_help()
exit(1 )
__UpperCamelCase = parse_unknown_args(__lowercase )
# Run
__UpperCamelCase = args.func(__lowercase , **__lowercase )
service.run()
if __name__ == "__main__":
main()
| 383
|
'''simple docstring'''
import functools
def _lowercase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__A : int = len(SCREAMING_SNAKE_CASE )
__A : int = len(SCREAMING_SNAKE_CASE )
@functools.cache
def min_distance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__A : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , SCREAMING_SNAKE_CASE ) , 1 + min_distance(SCREAMING_SNAKE_CASE , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 111
| 0
|
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def snake_case_ ( A_ : Any, A_ : str, A_ : Dict ):
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(_lowerCamelCase, 2 ) - pow(_lowerCamelCase, 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(_lowerCamelCase, 2 ) - pow(_lowerCamelCase, 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(_lowerCamelCase, 2 ) + pow(_lowerCamelCase, 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def snake_case_ ( A_ : Dict, A_ : Dict=False ):
'''simple docstring'''
_lowerCamelCase : List[str] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_lowerCamelCase : int = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def snake_case_ ( A_ : Union[str, Any], A_ : Optional[Any], A_ : int=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCamelCase : Optional[int] = ''''''
else:
_lowerCamelCase : str = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : List[str] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_lowerCamelCase : Optional[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
_lowerCamelCase : List[str] = in_proj_bias[: config.hidden_size]
_lowerCamelCase : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Any = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Tuple = in_proj_bias[-config.hidden_size :]
def snake_case_ ( A_ : Tuple ):
'''simple docstring'''
_lowerCamelCase : str = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(A_, A_ )
def snake_case_ ( A_ : int, A_ : Any, A_ : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : str = dct.pop(A_ )
_lowerCamelCase : List[Any] = val
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase : Any = Image.open(requests.get(A_, stream=A_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( A_ : Dict, A_ : Optional[Any], A_ : str=False ):
'''simple docstring'''
_lowerCamelCase : List[str] = BitConfig(
global_padding='''same''', layer_type='''bottleneck''', depths=(3, 4, 9), out_features=['''stage3'''], embedding_dynamic_padding=A_, )
_lowerCamelCase : Any = ViTHybridConfig(backbone_config=A_, image_size=3_84, num_labels=10_00 )
_lowerCamelCase : Optional[Any] = False
# load original model from timm
_lowerCamelCase : Any = timm.create_model(A_, pretrained=A_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCamelCase : Optional[Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(A_ )
_lowerCamelCase : int = create_rename_keys(A_, A_ )
for src, dest in rename_keys:
rename_key(A_, A_, A_ )
read_in_q_k_v(A_, A_, A_ )
_lowerCamelCase : Optional[Any] = '''huggingface/label-files'''
_lowerCamelCase : Tuple = '''imagenet-1k-id2label.json'''
_lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(A_, A_, repo_type='''dataset''' ), '''r''' ) )
_lowerCamelCase : List[Any] = {int(A_ ): v for k, v in idalabel.items()}
_lowerCamelCase : Union[str, Any] = idalabel
_lowerCamelCase : Tuple = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
_lowerCamelCase : List[Any] = ViTHybridModel(A_ ).eval()
else:
_lowerCamelCase : Dict = ViTHybridForImageClassification(A_ ).eval()
model.load_state_dict(A_ )
# create image processor
_lowerCamelCase : Any = create_transform(**resolve_data_config({}, model=A_ ) )
_lowerCamelCase : str = transform.transforms
_lowerCamelCase : Union[str, Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
_lowerCamelCase : Any = ViTHybridImageProcessor(
do_resize=A_, size={'''shortest_edge''': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=A_, crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]}, do_normalize=A_, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
_lowerCamelCase : List[Any] = prepare_img()
_lowerCamelCase : int = transform(A_ ).unsqueeze(0 )
_lowerCamelCase : Any = processor(A_, return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(A_, A_ )
# verify logits
with torch.no_grad():
_lowerCamelCase : Tuple = model(A_ )
_lowerCamelCase : List[Any] = outputs.logits
print('''Predicted class:''', logits.argmax(-1 ).item() )
if base_model:
_lowerCamelCase : List[Any] = timm_model.forward_features(A_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(A_, outputs.pooler_output, atol=1E-3 )
else:
_lowerCamelCase : str = timm_model(A_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(A_, outputs.logits, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(A_ ).mkdir(exist_ok=A_ )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(A_ )
if push_to_hub:
print(F'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(F'''ybelkada/{vit_name}''' )
processor.push_to_hub(F'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_r50_s16_384''',
type=str,
help='''Name of the hybrid ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 598
| 0
|
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__SCREAMING_SNAKE_CASE = TypeVar('T')
class lowerCAmelCase_ ( Generic[T] ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any | T =None
SCREAMING_SNAKE_CASE_ : int =len(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : list[T] =[any_type for _ in range(self.N )] + arr
SCREAMING_SNAKE_CASE_ : Tuple =fnc
self.build()
def __lowerCamelCase ( self ):
for p in range(self.N - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE_ : List[Any] =self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
p += self.N
SCREAMING_SNAKE_CASE_ : Union[str, Any] =v
while p > 1:
SCREAMING_SNAKE_CASE_ : Dict =p // 2
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ): # noqa: E741
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] =l + self.N, r + self.N
SCREAMING_SNAKE_CASE_ : T | None =None
while l <= r:
if l % 2 == 1:
SCREAMING_SNAKE_CASE_ : List[str] =self.st[l] if res is None else self.fn(__UpperCAmelCase , self.st[l] )
if r % 2 == 0:
SCREAMING_SNAKE_CASE_ : int =self.st[r] if res is None else self.fn(__UpperCAmelCase , self.st[r] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] =(l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__SCREAMING_SNAKE_CASE = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__SCREAMING_SNAKE_CASE = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__SCREAMING_SNAKE_CASE = SegmentTree(test_array, min)
__SCREAMING_SNAKE_CASE = SegmentTree(test_array, max)
__SCREAMING_SNAKE_CASE = SegmentTree(test_array, lambda a, b: a + b)
def SCREAMING_SNAKE_CASE__ ( ) -> None:
"""simple docstring"""
for i in range(len(lowerCAmelCase_ ) ):
for j in range(lowerCAmelCase_ ,len(lowerCAmelCase_ ) ):
SCREAMING_SNAKE_CASE_ : str =reduce(lowerCAmelCase_ ,test_array[i : j + 1] )
SCREAMING_SNAKE_CASE_ : str =reduce(lowerCAmelCase_ ,test_array[i : j + 1] )
SCREAMING_SNAKE_CASE_ : Dict =reduce(lambda lowerCAmelCase_ ,lowerCAmelCase_ : a + b ,test_array[i : j + 1] )
assert min_range == min_segment_tree.query(lowerCAmelCase_ ,lowerCAmelCase_ )
assert max_range == max_segment_tree.query(lowerCAmelCase_ ,lowerCAmelCase_ )
assert sum_range == sum_segment_tree.query(lowerCAmelCase_ ,lowerCAmelCase_ )
test_all_segments()
for index, value in test_updates.items():
__SCREAMING_SNAKE_CASE = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 220
|
from __future__ import annotations
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any =order
# a_{0} ... a_{k}
SCREAMING_SNAKE_CASE_ : List[str] =[1.0] + [0.0] * order
# b_{0} ... b_{k}
SCREAMING_SNAKE_CASE_ : List[str] =[1.0] + [0.0] * order
# x[n-1] ... x[n-k]
SCREAMING_SNAKE_CASE_ : List[Any] =[0.0] * self.order
# y[n-1] ... y[n-k]
SCREAMING_SNAKE_CASE_ : Any =[0.0] * self.order
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
if len(__UpperCAmelCase ) < self.order:
SCREAMING_SNAKE_CASE_ : str =[1.0, *a_coeffs]
if len(__UpperCAmelCase ) != self.order + 1:
SCREAMING_SNAKE_CASE_ : List[Any] =(
F"""Expected a_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(__UpperCAmelCase )}"""
)
raise ValueError(__UpperCAmelCase )
if len(__UpperCAmelCase ) != self.order + 1:
SCREAMING_SNAKE_CASE_ : List[str] =(
F"""Expected b_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(__UpperCAmelCase )}"""
)
raise ValueError(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =a_coeffs
SCREAMING_SNAKE_CASE_ : int =b_coeffs
def __lowerCamelCase ( self , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : str =0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
SCREAMING_SNAKE_CASE_ : Tuple =self.input_history[:-1]
SCREAMING_SNAKE_CASE_ : List[Any] =self.output_history[:-1]
SCREAMING_SNAKE_CASE_ : Any =sample
SCREAMING_SNAKE_CASE_ : Dict =result
return result
| 220
| 1
|
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase__ ( self : Optional[int]):
_lowercase: Any = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip")
_lowercase: str = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip")
model.to(_UpperCAmelCase)
from datasets import load_dataset
_lowercase: int = load_dataset("nielsr/rvlcdip-demo")
_lowercase: str = dataset['''train'''][0]['''image'''].convert("RGB")
_lowercase: Optional[int] = image_processor(_UpperCAmelCase , return_tensors="pt").to(_UpperCAmelCase)
# forward pass
with torch.no_grad():
_lowercase: Optional[Any] = model(**_UpperCAmelCase)
_lowercase: List[str] = outputs.logits
_lowercase: Any = torch.Size((1, 16))
self.assertEqual(logits.shape , _UpperCAmelCase)
_lowercase: str = torch.tensor(
[-0.4_1_5_8, -0.4_0_9_2, -0.4_3_4_7] , device=_UpperCAmelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , _UpperCAmelCase , atol=1e-4))
| 713
|
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
return abs(__magic_name__ ) if a == 0 else greatest_common_divisor(b % a , __magic_name__ )
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
while y: # --> when y=0 then loop will terminate and return x as final GCD.
_lowercase , _lowercase: Any = y, x % y
return abs(__magic_name__ )
def __lowerCAmelCase ( ):
try:
_lowercase: Optional[int] = input("Enter two integers separated by comma (,): " ).split("," )
_lowercase: Any = int(nums[0] )
_lowercase: int = int(nums[1] )
print(
f"greatest_common_divisor({num_a}, {num_a}) = "
f"{greatest_common_divisor(__magic_name__ , __magic_name__ )}" )
print(f"By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__magic_name__ , __magic_name__ )}" )
except (IndexError, UnboundLocalError, ValueError):
print("Wrong input" )
if __name__ == "__main__":
main()
| 206
| 0
|
'''simple docstring'''
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 1
while len(__UpperCAmelCase ) < 1e6:
constant.append(str(__UpperCAmelCase ) )
i += 1
__SCREAMING_SNAKE_CASE = """""".join(__UpperCAmelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 109
|
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowerCAmelCase : Optional[int] = 2
class lowerCAmelCase__ :
def __init__( self : Any , *, # begin keyword-only arguments
snake_case__ : List[str]="<s>" , snake_case__ : str="<pad>" , snake_case__ : List[str]="</s>" , snake_case__ : Any="<unk>" , snake_case__ : List[Any]=None , ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = bos, unk, pad, eos
UpperCAmelCase__ : str = []
UpperCAmelCase__ : Tuple = []
UpperCAmelCase__ : Optional[int] = {}
UpperCAmelCase__ : Optional[int] = self.add_symbol(snake_case__ )
UpperCAmelCase__ : Tuple = self.add_symbol(snake_case__ )
UpperCAmelCase__ : str = self.add_symbol(snake_case__ )
UpperCAmelCase__ : Optional[int] = self.add_symbol(snake_case__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(snake_case__ )
UpperCAmelCase__ : List[str] = len(self.symbols )
def __eq__( self : List[Any] , snake_case__ : str ):
'''simple docstring'''
return self.indices == other.indices
def __getitem__( self : Dict , snake_case__ : Union[str, Any] ):
'''simple docstring'''
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : int ):
'''simple docstring'''
return len(self.symbols )
def __contains__( self : Dict , snake_case__ : Optional[Any] ):
'''simple docstring'''
return sym in self.indices
@classmethod
def __a ( cls : Optional[int] , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = cls()
d.add_from_file(snake_case__ )
return d
def __a ( self : Optional[Any] , snake_case__ : str , snake_case__ : Dict=1 , snake_case__ : Dict=False ):
'''simple docstring'''
if word in self.indices and not overwrite:
UpperCAmelCase__ : List[Any] = self.indices[word]
UpperCAmelCase__ : Dict = self.count[idx] + n
return idx
else:
UpperCAmelCase__ : Optional[Any] = len(self.symbols )
UpperCAmelCase__ : List[str] = idx
self.symbols.append(snake_case__ )
self.count.append(snake_case__ )
return idx
def __a ( self : Union[str, Any] , snake_case__ : List[Any] ):
'''simple docstring'''
return 0
def __a ( self : Any , snake_case__ : Dict ):
'''simple docstring'''
if isinstance(snake_case__ , snake_case__ ):
try:
with open(snake_case__ , "r" , encoding="utf-8" ) as fd:
self.add_from_file(snake_case__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(snake_case__ ) )
return
UpperCAmelCase__ : Optional[int] = f.readlines()
UpperCAmelCase__ : Dict = self._load_meta(snake_case__ )
for line in lines[indices_start_line:]:
try:
UpperCAmelCase__ , UpperCAmelCase__ : str = line.rstrip().rsplit(" " , 1 )
if field == "#fairseq:overwrite":
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ , UpperCAmelCase__ : str = line.rsplit(" " , 1 )
else:
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Any = int(snake_case__ )
UpperCAmelCase__ : List[Any] = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(snake_case__ ) )
self.add_symbol(snake_case__ , n=snake_case__ , overwrite=snake_case__ )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = dict((re.sub(r"@@$" , "" , snake_case ), v) if k.endswith("@@" ) else (re.sub(r"$" , "</w>" , snake_case ), v) for k, v in d.items() )
UpperCAmelCase__ : Optional[int] = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[f'{k}</w>']
UpperCAmelCase__ : List[str] = d[k] # restore
return da
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : int )-> Union[str, Any]:
'''simple docstring'''
if not os.path.exists(snake_case ):
raise ValueError(f'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(snake_case , exist_ok=snake_case )
print(f'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
UpperCAmelCase__ : Dict = os.path.join(snake_case , "checkpoint.pt" )
if not os.path.isfile(snake_case ):
raise ValueError(f'path to the file {checkpoint_file} does not exist!' )
UpperCAmelCase__ : Optional[int] = torch.load(snake_case , map_location="cpu" )
UpperCAmelCase__ : Optional[int] = chkpt["cfg"]["model"]
# dicts
UpperCAmelCase__ : Optional[int] = os.path.join(snake_case , "dict.txt" )
if not os.path.isfile(snake_case ):
raise ValueError(f'path to the file {dict_file} does not exist!' )
UpperCAmelCase__ : Dict = Dictionary.load(snake_case )
UpperCAmelCase__ : Optional[Any] = rewrite_dict_keys(src_dict.indices )
UpperCAmelCase__ : List[str] = len(snake_case )
UpperCAmelCase__ : Tuple = os.path.join(snake_case , VOCAB_FILES_NAMES["vocab_file"] )
print(f'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# merges_file (bpecodes)
UpperCAmelCase__ : List[Any] = os.path.join(snake_case , "bpecodes" )
if not os.path.isfile(snake_case ):
raise ValueError(f'path to the file {bpecodes_file} does not exist!' )
UpperCAmelCase__ : int = os.path.join(snake_case , VOCAB_FILES_NAMES["merges_file"] )
shutil.copyfile(snake_case , snake_case )
# model config
UpperCAmelCase__ : List[Any] = os.path.join(snake_case , "config.json" )
UpperCAmelCase__ : Any = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1E-1_2,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(f'Generating {biogpt_model_config_file}' )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# tokenizer config
UpperCAmelCase__ : List[Any] = os.path.join(snake_case , snake_case )
UpperCAmelCase__ : str = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1024,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(f'Generating {biogpt_tokenizer_config_file}' )
with open(snake_case , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case , ensure_ascii=snake_case , indent=snake_case ) )
# model
UpperCAmelCase__ : Union[str, Any] = chkpt["model"]
# remove unneeded keys
UpperCAmelCase__ : Union[str, Any] = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(snake_case , snake_case )
UpperCAmelCase__ : Union[str, Any] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight" ):
UpperCAmelCase__ : int = model_state_dict.pop(snake_case )
else:
UpperCAmelCase__ : Tuple = model_state_dict.pop(snake_case )
UpperCAmelCase__ : Any = BioGptConfig.from_pretrained(snake_case )
UpperCAmelCase__ : List[Any] = BioGptForCausalLM(snake_case )
# check that it loads ok
model_new.load_state_dict(snake_case )
# save
UpperCAmelCase__ : Union[str, Any] = os.path.join(snake_case , snake_case )
print(f'Generating {pytorch_weights_dump_path}' )
torch.save(snake_case , snake_case )
print("Conversion is done!" )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_lowerCAmelCase : List[str] = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 438
| 0
|
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class lowercase :
lowercase_ : Any =BlenderbotConfig
lowercase_ : int ={}
lowercase_ : Optional[Any] ='''gelu'''
def __init__( self ,A__ ,A__=1_3 ,A__=7 ,A__=True ,A__=False ,A__=9_9 ,A__=3_2 ,A__=2 ,A__=4 ,A__=3_7 ,A__=0.1 ,A__=0.1 ,A__=2_0 ,A__=2 ,A__=1 ,A__=0 ,):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_labels
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = eos_token_id
lowercase = pad_token_id
lowercase = bos_token_id
def A__ ( self):
lowercase = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size)
lowercase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) ,1)
lowercase = tf.concat([input_ids, eos_tensor] ,axis=1)
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
lowercase = self.config_cls(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,)
lowercase = prepare_blenderbot_inputs_dict(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase)
return config, inputs_dict
def A__ ( self ,A__ ,A__):
lowercase = TFBlenderbotModel(config=__UpperCamelCase).get_decoder()
lowercase = inputs_dict['''input_ids''']
lowercase = input_ids[:1, :]
lowercase = inputs_dict['''attention_mask'''][:1, :]
lowercase = inputs_dict['''head_mask''']
lowercase = 1
# first forward pass
lowercase = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,head_mask=__UpperCamelCase ,use_cache=__UpperCamelCase)
lowercase , lowercase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase = ids_tensor((self.batch_size, 3) ,config.vocab_size)
lowercase = tf.cast(ids_tensor((self.batch_size, 3) ,2) ,tf.inta)
# append to next input_ids and
lowercase = tf.concat([input_ids, next_tokens] ,axis=-1)
lowercase = tf.concat([attention_mask, next_attn_mask] ,axis=-1)
lowercase = model(__UpperCamelCase ,attention_mask=__UpperCamelCase)[0]
lowercase = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase)[0]
self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1])
# select random slice
lowercase = int(ids_tensor((1,) ,output_from_past.shape[-1]))
lowercase = output_from_no_past[:, -3:, random_slice_idx]
lowercase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCamelCase ,__UpperCamelCase ,rtol=1E-3)
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ):
'''simple docstring'''
if attention_mask is None:
lowercase = tf.cast(tf.math.not_equal(UpperCAmelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Any =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowercase_ : str =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowercase_ : Any =(
{
'''conversational''': TFBlenderbotForConditionalGeneration,
'''feature-extraction''': TFBlenderbotModel,
'''summarization''': TFBlenderbotForConditionalGeneration,
'''text2text-generation''': TFBlenderbotForConditionalGeneration,
'''translation''': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase_ : Optional[int] =True
lowercase_ : Dict =False
lowercase_ : int =False
def A__ ( self):
lowercase = TFBlenderbotModelTester(self)
lowercase = ConfigTester(self ,config_class=__UpperCamelCase)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase)
@require_tokenizers
@require_tf
class lowercase ( unittest.TestCase ):
lowercase_ : List[Any] =['''My friends are cool but they eat too many carbs.''']
lowercase_ : Optional[int] ='''facebook/blenderbot-400M-distill'''
@cached_property
def A__ ( self):
return BlenderbotTokenizer.from_pretrained(self.model_name)
@cached_property
def A__ ( self):
lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
@slow
def A__ ( self):
lowercase = self.tokenizer(self.src_text ,return_tensors='''tf''')
lowercase = self.model.generate(
model_inputs.input_ids ,)
lowercase = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=__UpperCamelCase)[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 715
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,*A__ ,A__=None ,A__=None ,**A__):
super().__init__(*A__ ,**A__)
lowercase = eval_examples
lowercase = post_process_function
def A__ ( self ,A__ = None ,A__=None ,A__ = None ,A__ = "eval" ,**A__ ,):
lowercase = gen_kwargs.copy()
lowercase = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''') is not None else self.args.generation_max_length
)
lowercase = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''') is not None else self.args.generation_num_beams
)
lowercase = gen_kwargs
lowercase = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase = self.get_eval_dataloader(A__)
lowercase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = time.time()
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase = eval_loop(
A__ ,description='''Evaluation''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A__ ,metric_key_prefix=A__ ,)
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A__ ,A__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size) ,))
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase = self.post_process_function(A__ ,A__ ,A__)
lowercase = self.compute_metrics(A__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase = metrics.pop(A__)
metrics.update(output.metrics)
else:
lowercase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A__)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
lowercase = self.callback_handler.on_evaluate(self.args ,self.state ,self.control ,A__)
return metrics
def A__ ( self ,A__ ,A__ ,A__=None ,A__ = "test" ,**A__):
lowercase = gen_kwargs.copy()
lowercase = self.get_test_dataloader(A__)
# Temporarily disable metric computation, we will do it in the loop here.
lowercase = self.compute_metrics
lowercase = None
lowercase = time.time()
lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase = eval_loop(
A__ ,description='''Prediction''' ,prediction_loss_only=True if compute_metrics is None else None ,ignore_keys=A__ ,metric_key_prefix=A__ ,)
finally:
lowercase = compute_metrics
lowercase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
A__ ,A__ ,num_samples=output.num_samples ,num_steps=math.ceil(output.num_samples / total_batch_size) ,))
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase = self.post_process_function(A__ ,A__ ,A__ ,'''predict''')
lowercase = self.compute_metrics(A__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f'{metric_key_prefix}_'):
lowercase = metrics.pop(A__)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions ,label_ids=predictions.label_ids ,metrics=A__)
| 633
| 0
|
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
_UpperCamelCase : Union[str, Any] = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
_UpperCamelCase : Tuple = {
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
_UpperCamelCase : Dict = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
_UpperCamelCase : List[Any] = sorted(arg_to_scheduler.keys())
_UpperCamelCase : str = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class snake_case__ ( pl.LightningModule):
def __init__( self : Tuple , _A : List[str] , _A : Optional[int]=None , _A : List[Any]="base" , _A : List[Any]=None , _A : List[str]=None , _A : Any=None , **_A : Dict , ) -> Optional[int]:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(snake_case__ )
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : int = Path(self.hparams.output_dir )
UpperCAmelCase_ : Dict = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=snake_case__ , **snake_case__ , )
else:
UpperCAmelCase_ : PretrainedConfig = config
UpperCAmelCase_ : Union[str, Any] = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , snake_case__ , snake_case__ ):
assert hasattr(self.config , snake_case__ ), F"model config doesn\'t have a `{p}` attribute"
setattr(self.config , snake_case__ , getattr(self.hparams , snake_case__ ) )
if tokenizer is None:
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=snake_case__ , )
else:
UpperCAmelCase_ : PreTrainedTokenizer = tokenizer
UpperCAmelCase_ : List[Any] = MODEL_MODES[mode]
if model is None:
UpperCAmelCase_ : Tuple = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=snake_case__ , )
else:
UpperCAmelCase_ : Dict = model
def A ( self : Tuple , *_A : Union[str, Any] , **_A : Tuple ) -> int:
UpperCAmelCase_ : List[str] = self.model_type.from_pretrained(*snake_case__ , **snake_case__ )
def A ( self : Dict ) -> List[Any]:
UpperCAmelCase_ : Union[str, Any] = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCAmelCase_ : List[str] = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
UpperCAmelCase_ : Tuple = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def A ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = self.model
UpperCAmelCase_ : Any = ['bias', 'LayerNorm.weight']
UpperCAmelCase_ : Optional[Any] = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
UpperCAmelCase_ : List[Any] = Adafactor(
snake_case__ , lr=self.hparams.learning_rate , scale_parameter=snake_case__ , relative_step=snake_case__ )
else:
UpperCAmelCase_ : str = AdamW(
snake_case__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
UpperCAmelCase_ : str = optimizer
UpperCAmelCase_ : Optional[Any] = self.get_lr_scheduler()
return [optimizer], [scheduler]
def A ( self : str , _A : Optional[Any] , _A : Optional[int] ) -> Optional[int]:
return self.validation_step(snake_case__ , snake_case__ )
def A ( self : Union[str, Any] , _A : Tuple ) -> Optional[Any]:
return self.validation_end(snake_case__ )
def A ( self : Union[str, Any] ) -> int:
UpperCAmelCase_ : Optional[Any] = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCAmelCase_ : Dict = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def A ( self : Optional[Any] , _A : Any ) -> List[Any]:
if stage == "test":
UpperCAmelCase_ : Optional[Any] = len(self.test_dataloader().dataset )
else:
UpperCAmelCase_ : Any = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=snake_case__ )
UpperCAmelCase_ : Optional[Any] = len(self.train_dataloader().dataset )
def A ( self : List[str] , _A : Union[str, Any] , _A : Dict , _A : Union[str, Any] = False ) -> Union[str, Any]:
raise NotImplementedError('''You must implement this for your task''' )
def A ( self : int ) -> Optional[Any]:
return self.train_loader
def A ( self : int ) -> Tuple:
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=snake_case__ )
def A ( self : Optional[Any] ) -> Optional[Any]:
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=snake_case__ )
def A ( self : str , _A : Dict ) -> Optional[Any]:
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
snake_case__ , list(filter(snake_case__ , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def A ( self : Optional[int] , _A : List[str] ) -> Dict:
UpperCAmelCase_ : int = self.output_dir.joinpath('''best_tfmr''' )
UpperCAmelCase_ : Optional[Any] = self.step_count
self.model.save_pretrained(snake_case__ )
self.tokenizer.save_pretrained(snake_case__ )
@staticmethod
def A ( _A : List[str] , _A : Any ) -> Dict:
parser.add_argument(
'''--model_name_or_path''' , default=snake_case__ , type=snake_case__ , required=snake_case__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=snake_case__ , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=snake_case__ , type=snake_case__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(snake_case__ ).parent / '''test_run''' / '''cache''' ) , type=snake_case__ , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=snake_case__ , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=snake_case__ , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=snake_case__ , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=snake_case__ , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5e-5 , type=snake_case__ , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=snake_case__ , metavar=snake_case__ , type=snake_case__ , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=snake_case__ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=snake_case__ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=snake_case__ , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=snake_case__ , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=snake_case__ )
parser.add_argument('''--train_batch_size''' , default=32 , type=snake_case__ )
parser.add_argument('''--eval_batch_size''' , default=32 , type=snake_case__ )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class snake_case__ ( pl.Callback):
def A ( self : int , _A : Union[str, Any] , _A : Optional[int] ) -> Tuple:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class snake_case__ ( pl.Callback):
def A ( self : List[str] , _A : Union[str, Any] , _A : List[Any] ) -> Dict:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(snake_case__ )
class snake_case__ ( pl.Callback):
def A ( self : Optional[Any] , _A : Tuple , _A : List[str] ) -> Optional[Any]:
UpperCAmelCase_ : Dict = trainer.lr_schedulers[0]['scheduler']
UpperCAmelCase_ : Union[str, Any] = {F"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(snake_case__ )
def A ( self : List[str] , _A : Tuple , _A : Tuple ) -> List[Any]:
rank_zero_info('''***** Validation results *****''' )
UpperCAmelCase_ : Optional[Any] = trainer.callback_metrics
# Log results
for key in sorted(snake_case__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(snake_case__ , str(metrics[key] ) ) )
def A ( self : List[Any] , _A : Optional[Any] , _A : Dict ) -> int:
rank_zero_info('''***** Test results *****''' )
UpperCAmelCase_ : Dict = trainer.callback_metrics
# Log and save results to file
UpperCAmelCase_ : Tuple = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(snake_case__ , '''w''' ) as writer:
for key in sorted(snake_case__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(snake_case__ , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(snake_case__ , str(metrics[key] ) ) )
def __UpperCAmelCase ( A : List[str] , A : List[str] ) -> None:
parser.add_argument(
'''--output_dir''' , default=str(Path(lowerCamelCase_ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=lowerCamelCase_ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=lowerCamelCase_ , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=lowerCamelCase_ )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=lowerCamelCase_ , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=lowerCamelCase_ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=lowerCamelCase_ , default=4_2 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(lowerCamelCase_ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=lowerCamelCase_ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def __UpperCAmelCase ( A : BaseTransformer , A : argparse.Namespace , A : Optional[Any]=None , A : str=True , A : Dict=[] , A : Any=None , A : Optional[Any]=None , **A : List[Any] , ) -> List[str]:
pl.seed_everything(args.seed )
# init model
UpperCAmelCase_ : List[str] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCamelCase_ )
# add custom checkpoints
if checkpoint_callback is None:
UpperCAmelCase_ : Dict = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCamelCase_ )
if logging_callback is None:
UpperCAmelCase_ : str = LoggingCallback()
UpperCAmelCase_ : Union[str, Any] = {}
if args.fpaa:
UpperCAmelCase_ : Optional[Any] = 1_6
if args.gpus > 1:
UpperCAmelCase_ : Union[str, Any] = 'auto'
UpperCAmelCase_ : Optional[Any] = 'ddp'
UpperCAmelCase_ : Dict = args.accumulate_grad_batches
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : Union[str, Any] = 'auto'
UpperCAmelCase_ : Union[str, Any] = pl.Trainer.from_argparse_args(
lowerCamelCase_ , weights_summary=lowerCamelCase_ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCamelCase_ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCamelCase_ , )
if args.do_train:
trainer.fit(lowerCamelCase_ )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 541
|
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
UpperCamelCase__ : Dict = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowerCamelCase_ ):
def __init__( self ,*snake_case__ ,**snake_case__ ):
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' ,snake_case__ ,)
super().__init__(*snake_case__ ,**snake_case__ )
| 105
| 0
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowerCamelCase__ : List[Any] =LongformerTokenizer
lowerCamelCase__ : Union[str, Any] =True
lowerCamelCase__ : Optional[Any] =LongformerTokenizerFast
lowerCamelCase__ : Union[str, Any] =True
def lowercase ( self ) -> List[str]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__magic_name__ : int = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__magic_name__ : str = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
__magic_name__ : Optional[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__magic_name__ : List[Any] = {'''unk_token''': '''<unk>'''}
__magic_name__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__magic_name__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowercase ( self , **lowerCamelCase ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowercase ( self , **lowerCamelCase ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowercase ( self , lowerCamelCase ) -> Dict:
"""simple docstring"""
__magic_name__ : List[str] = '''lower newer'''
__magic_name__ : Optional[int] = '''lower newer'''
return input_text, output_text
def lowercase ( self ) -> Dict:
"""simple docstring"""
__magic_name__ : Optional[int] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__magic_name__ : Tuple = '''lower newer'''
__magic_name__ : List[Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__magic_name__ : List[Any] = tokenizer.tokenize(UpperCAmelCase__ ) # , add_prefix_space=True)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__magic_name__ : Optional[int] = tokens + [tokenizer.unk_token]
__magic_name__ : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowercase ( self ) -> int:
"""simple docstring"""
__magic_name__ : Tuple = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=UpperCAmelCase__ ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=UpperCAmelCase__ ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
__magic_name__ : Any = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__magic_name__ : Union[str, Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ )
__magic_name__ : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ )
__magic_name__ : str = tokenizer.encode(
'''sequence builders''' , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
__magic_name__ : Union[str, Any] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
__magic_name__ : int = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
__magic_name__ : List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowercase ( self ) -> str:
"""simple docstring"""
__magic_name__ : List[str] = self.get_tokenizer()
__magic_name__ : Optional[int] = '''Encode this sequence.'''
__magic_name__ : int = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__magic_name__ : Dict = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
__magic_name__ : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__magic_name__ : Any = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
__magic_name__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__magic_name__ : Optional[Any] = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
__magic_name__ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Testing spaces after special tokens
__magic_name__ : Any = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ )} ) # mask token has a left space
__magic_name__ : Any = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ )
__magic_name__ : Dict = '''Encode <mask> sequence'''
__magic_name__ : str = '''Encode <mask>sequence'''
__magic_name__ : int = tokenizer.encode(UpperCAmelCase__ )
__magic_name__ : List[Any] = encoded.index(UpperCAmelCase__ )
__magic_name__ : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
__magic_name__ : str = tokenizer.encode(UpperCAmelCase__ )
__magic_name__ : Dict = encoded.index(UpperCAmelCase__ )
__magic_name__ : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCAmelCase__ , UpperCAmelCase__ )
def lowercase ( self ) -> Dict:
"""simple docstring"""
pass
def lowercase ( self ) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ : Any = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__magic_name__ : Union[str, Any] = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
__magic_name__ : List[Any] = '''A, <mask> AllenNLP sentence.'''
__magic_name__ : Any = tokenizer_r.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
__magic_name__ : Dict = tokenizer_p.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__magic_name__ : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__magic_name__ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
UpperCAmelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__magic_name__ : List[str] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
__magic_name__ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__magic_name__ : str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , UpperCAmelCase__ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , UpperCAmelCase__ )
self.assertEqual(post_processor_state['''trim_offsets'''] , UpperCAmelCase__ )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__magic_name__ : Dict = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__magic_name__ : Optional[Any] = F'''{text_of_1_token} {text_of_1_token}'''
__magic_name__ : Any = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
__magic_name__ : Dict = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase__ ) + 1, len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
__magic_name__ : Tuple = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
__magic_name__ : str = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase__ ) + 1, len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
__magic_name__ : List[str] = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
__magic_name__ : str = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
__magic_name__ : int = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
__magic_name__ : Tuple = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCAmelCase__ ), len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
__magic_name__ : Any = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__magic_name__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
__magic_name__ : int = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase__ ) + 1, 1 + len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
__magic_name__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
__magic_name__ : Dict = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase__ ), 1 + len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
__magic_name__ : int = self.rust_tokenizer_class.from_pretrained(
UpperCAmelCase__ , use_fast=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ )
__magic_name__ : Dict = tokenizer_r(UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCAmelCase__ ), 1 + len(UpperCAmelCase__ ) + 1 + len(UpperCAmelCase__ )) , )
| 718
|
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class A__ ( unittest.TestCase ):
lowerCamelCase__ : Union[str, Any] =MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
lowerCamelCase__ : Union[str, Any] =TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> str:
"""simple docstring"""
__magic_name__ : Optional[Any] = AudioClassificationPipeline(model=lowerCamelCase , feature_extractor=lowerCamelCase )
# test with a raw waveform
__magic_name__ : Union[str, Any] = np.zeros((34000,) )
__magic_name__ : Union[str, Any] = np.zeros((14000,) )
return audio_classifier, [audioa, audio]
def lowercase ( self , lowerCamelCase , lowerCamelCase ) -> Any:
"""simple docstring"""
__magic_name__ , __magic_name__ : str = examples
__magic_name__ : Tuple = audio_classifier(lowerCamelCase )
# by default a model is initialized with num_labels=2
self.assertEqual(
lowerCamelCase , [
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
] , )
__magic_name__ : Any = audio_classifier(lowerCamelCase , top_k=1 )
self.assertEqual(
lowerCamelCase , [
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
] , )
self.run_torchaudio(lowerCamelCase )
@require_torchaudio
def lowercase ( self , lowerCamelCase ) -> Any:
"""simple docstring"""
import datasets
# test with a local file
__magic_name__ : List[Any] = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
__magic_name__ : Tuple = dataset[0]['''audio''']['''array''']
__magic_name__ : Dict = audio_classifier(lowerCamelCase )
self.assertEqual(
lowerCamelCase , [
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
{'''score''': ANY(lowerCamelCase ), '''label''': ANY(lowerCamelCase )},
] , )
@require_torch
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
__magic_name__ : Optional[int] = '''anton-l/wav2vec2-random-tiny-classifier'''
__magic_name__ : int = pipeline('''audio-classification''' , model=lowerCamelCase )
__magic_name__ : Tuple = np.ones((8000,) )
__magic_name__ : Any = audio_classifier(lowerCamelCase , top_k=4 )
__magic_name__ : List[str] = [
{'''score''': 0.0_8_4_2, '''label''': '''no'''},
{'''score''': 0.0_8_3_8, '''label''': '''up'''},
{'''score''': 0.0_8_3_7, '''label''': '''go'''},
{'''score''': 0.0_8_3_4, '''label''': '''right'''},
]
__magic_name__ : Optional[Any] = [
{'''score''': 0.0_8_4_5, '''label''': '''stop'''},
{'''score''': 0.0_8_4_4, '''label''': '''on'''},
{'''score''': 0.0_8_4_1, '''label''': '''right'''},
{'''score''': 0.0_8_3_4, '''label''': '''left'''},
]
self.assertIn(nested_simplify(lowerCamelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
__magic_name__ : Optional[Any] = {'''array''': np.ones((8000,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
__magic_name__ : Optional[Any] = audio_classifier(lowerCamelCase , top_k=4 )
self.assertIn(nested_simplify(lowerCamelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
import datasets
__magic_name__ : Optional[Any] = '''superb/wav2vec2-base-superb-ks'''
__magic_name__ : List[Any] = pipeline('''audio-classification''' , model=lowerCamelCase )
__magic_name__ : Tuple = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' )
__magic_name__ : Optional[Any] = np.array(dataset[3]['''speech'''] , dtype=np.floataa )
__magic_name__ : Optional[int] = audio_classifier(lowerCamelCase , top_k=4 )
self.assertEqual(
nested_simplify(lowerCamelCase , decimals=3 ) , [
{'''score''': 0.9_8_1, '''label''': '''go'''},
{'''score''': 0.0_0_7, '''label''': '''up'''},
{'''score''': 0.0_0_6, '''label''': '''_unknown_'''},
{'''score''': 0.0_0_1, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def lowercase ( self ) -> int:
"""simple docstring"""
pass
| 336
| 0
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
create_state_space_tree(lowerCamelCase_ , [] , 0 , [0 for i in range(len(lowerCamelCase_ ) )] )
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
"""simple docstring"""
if index == len(lowerCamelCase_ ):
print(lowerCamelCase_ )
return
for i in range(len(lowerCamelCase_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
lowerCAmelCase__ : Tuple = True
create_state_space_tree(lowerCamelCase_ , lowerCamelCase_ , index + 1 , lowerCamelCase_ )
current_sequence.pop()
lowerCAmelCase__ : Optional[int] = False
snake_case = [3, 1, 2, 4]
generate_all_permutations(sequence)
snake_case = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 378
|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
A_ : int = StableDiffusionDiffEditPipeline
A_ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""height""", """width""", """image"""} | {"""image_latents"""}
A_ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"""image"""} | {"""image_latents"""}
A_ : Dict = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ : Union[str, Any] = frozenset([] )
def _A ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
lowerCAmelCase__ : str = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a__ , set_alpha_to_one=a__ , )
lowerCAmelCase__ : str = DDIMInverseScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a__ , set_alpha_to_zero=a__ , )
torch.manual_seed(0 )
lowerCAmelCase__ : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCAmelCase__ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
lowerCAmelCase__ : Tuple = CLIPTextModel(a__ )
lowerCAmelCase__ : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCAmelCase__ : List[Any] = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _A ( self : Dict , a__ : List[Any] , a__ : str=0 ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = floats_tensor((1, 16, 16) , rng=random.Random(a__ ) ).to(a__ )
lowerCAmelCase__ : Optional[int] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(a__ ) ).to(a__ )
if str(a__ ).startswith("mps" ):
lowerCAmelCase__ : Any = torch.manual_seed(a__ )
else:
lowerCAmelCase__ : int = torch.Generator(device=a__ ).manual_seed(a__ )
lowerCAmelCase__ : Optional[Any] = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _A ( self : Union[str, Any] , a__ : Union[str, Any] , a__ : Tuple=0 ):
'''simple docstring'''
lowerCAmelCase__ : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
lowerCAmelCase__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ : Tuple = Image.fromarray(np.uinta(a__ ) ).convert("RGB" )
if str(a__ ).startswith("mps" ):
lowerCAmelCase__ : str = torch.manual_seed(a__ )
else:
lowerCAmelCase__ : List[str] = torch.Generator(device=a__ ).manual_seed(a__ )
lowerCAmelCase__ : Dict = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _A ( self : Tuple , a__ : Any , a__ : Optional[Any]=0 ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
lowerCAmelCase__ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ : Tuple = Image.fromarray(np.uinta(a__ ) ).convert("RGB" )
if str(a__ ).startswith("mps" ):
lowerCAmelCase__ : Dict = torch.manual_seed(a__ )
else:
lowerCAmelCase__ : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
lowerCAmelCase__ : Dict = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def _A ( self : List[str] ):
'''simple docstring'''
if not hasattr(self.pipeline_class , "_optional_components" ):
return
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Optional[Any] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(a__ , a__ , a__ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowerCAmelCase__ : str = self.get_dummy_inputs(a__ )
lowerCAmelCase__ : List[Any] = pipe(**a__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a__ )
lowerCAmelCase__ : Tuple = self.pipeline_class.from_pretrained(a__ )
pipe_loaded.to(a__ )
pipe_loaded.set_progress_bar_config(disable=a__ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a__ , a__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(a__ )
lowerCAmelCase__ : int = pipe_loaded(**a__ )[0]
lowerCAmelCase__ : Dict = np.abs(output - output_loaded ).max()
self.assertLess(a__ , 1e-4 )
def _A ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = "cpu"
lowerCAmelCase__ : Any = self.get_dummy_components()
lowerCAmelCase__ : List[Any] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_mask_inputs(a__ )
lowerCAmelCase__ : List[str] = pipe.generate_mask(**a__ )
lowerCAmelCase__ : Optional[Any] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowerCAmelCase__ : str = np.array([0] * 9 )
lowerCAmelCase__ : Dict = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _A ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = "cpu"
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase__ : List[Any] = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
lowerCAmelCase__ : List[str] = self.get_dummy_inversion_inputs(a__ )
lowerCAmelCase__ : Optional[int] = pipe.invert(**a__ ).images
lowerCAmelCase__ : Dict = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCAmelCase__ : Tuple = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
lowerCAmelCase__ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
def _A ( self : str ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def _A ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = "cpu"
lowerCAmelCase__ : Optional[int] = self.get_dummy_components()
lowerCAmelCase__ : List[Any] = {"beta_start": 0.00085, "beta_end": 0.012, "beta_schedule": "scaled_linear"}
lowerCAmelCase__ : List[Any] = DPMSolverMultistepScheduler(**a__ )
lowerCAmelCase__ : List[Any] = DPMSolverMultistepInverseScheduler(**a__ )
lowerCAmelCase__ : Any = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
lowerCAmelCase__ : Dict = self.get_dummy_inversion_inputs(a__ )
lowerCAmelCase__ : Optional[Any] = pipe.invert(**a__ ).images
lowerCAmelCase__ : Any = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCAmelCase__ : Dict = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
lowerCAmelCase__ : int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
@require_torch_gpu
@slow
class lowerCAmelCase ( unittest.TestCase ):
def _A ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _A ( cls : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
lowerCAmelCase__ : Dict = raw_image.convert("RGB" ).resize((768, 768) )
lowerCAmelCase__ : str = raw_image
def _A ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = torch.manual_seed(0 )
lowerCAmelCase__ : str = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a__ , torch_dtype=torch.floataa )
lowerCAmelCase__ : Optional[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
lowerCAmelCase__ : int = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a__ )
lowerCAmelCase__ : Optional[Any] = "a bowl of fruit"
lowerCAmelCase__ : Union[str, Any] = "a bowl of pears"
lowerCAmelCase__ : Optional[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=a__ , target_prompt=a__ , generator=a__ , )
lowerCAmelCase__ : str = pipe.invert(
prompt=a__ , image=self.raw_image , inpaint_strength=0.7 , generator=a__ ).latents
lowerCAmelCase__ : Tuple = pipe(
prompt=a__ , mask_image=a__ , image_latents=a__ , generator=a__ , negative_prompt=a__ , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
lowerCAmelCase__ : List[str] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def _A ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch.manual_seed(0 )
lowerCAmelCase__ : str = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=a__ , torch_dtype=torch.floataa )
lowerCAmelCase__ : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCAmelCase__ : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=a__ )
lowerCAmelCase__ : Any = "a bowl of fruit"
lowerCAmelCase__ : Optional[int] = "a bowl of pears"
lowerCAmelCase__ : Optional[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=a__ , target_prompt=a__ , generator=a__ , )
lowerCAmelCase__ : Optional[int] = pipe.invert(
prompt=a__ , image=self.raw_image , inpaint_strength=0.7 , generator=a__ , num_inference_steps=25 , ).latents
lowerCAmelCase__ : Any = pipe(
prompt=a__ , mask_image=a__ , image_latents=a__ , generator=a__ , negative_prompt=a__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
lowerCAmelCase__ : Optional[int] = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 378
| 1
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : Union[str, Any] =["""image_processor""", """tokenizer"""]
__a : List[str] ="""OwlViTImageProcessor"""
__a : List[Any] =("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ ):
lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase_ , )
lowerCAmelCase = kwargs.pop('''feature_extractor''' )
lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __call__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_="max_length" , UpperCAmelCase_="np" , **UpperCAmelCase_ ):
if text is None and query_images is None and images is None:
raise ValueError(
'''You have to specify at least one text or query image or image. All three cannot be none.''' )
if text is not None:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or (isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and not isinstance(text[0] , UpperCAmelCase_ )):
lowerCAmelCase = [self.tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )]
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(text[0] , UpperCAmelCase_ ):
lowerCAmelCase = []
# Maximum number of queries across batch
lowerCAmelCase = max([len(UpperCAmelCase_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCAmelCase_ ) != max_num_queries:
lowerCAmelCase = t + [''' '''] * (max_num_queries - len(UpperCAmelCase_ ))
lowerCAmelCase = self.tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
encodings.append(UpperCAmelCase_ )
else:
raise TypeError('''Input text should be a string, a list of strings or a nested list of strings''' )
if return_tensors == "np":
lowerCAmelCase = np.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = np.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCAmelCase = jnp.concatenate([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = jnp.concatenate([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCAmelCase = torch.cat([encoding['''input_ids'''] for encoding in encodings] , dim=0 )
lowerCAmelCase = torch.cat([encoding['''attention_mask'''] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCAmelCase = tf.stack([encoding['''input_ids'''] for encoding in encodings] , axis=0 )
lowerCAmelCase = tf.stack([encoding['''attention_mask'''] for encoding in encodings] , axis=0 )
else:
raise ValueError('''Target return tensor type could not be returned''' )
lowerCAmelCase = BatchEncoding()
lowerCAmelCase = input_ids
lowerCAmelCase = attention_mask
if query_images is not None:
lowerCAmelCase = BatchEncoding()
lowerCAmelCase = self.image_processor(
UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ ).pixel_values
lowerCAmelCase = query_pixel_values
if images is not None:
lowerCAmelCase = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ )
def __snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
return self.image_processor.post_process(*UpperCAmelCase_ , **UpperCAmelCase_ )
def __snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
return self.image_processor.post_process_object_detection(*UpperCAmelCase_ , **UpperCAmelCase_ )
def __snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
return self.image_processor.post_process_image_guided_detection(*UpperCAmelCase_ , **UpperCAmelCase_ )
def __snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def __snake_case ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def __snake_case ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase_ , )
return self.image_processor_class
@property
def __snake_case ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase_ , )
return self.image_processor
| 33
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase , lowerCAmelCase = analyze_text(_snake_case )
lowerCAmelCase = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCAmelCase = sum(single_char_strings.values() )
# one length string
lowerCAmelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCAmelCase = single_char_strings[ch]
lowerCAmelCase = my_str / all_sum
my_fir_sum += prob * math.loga(_snake_case ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
lowerCAmelCase = sum(two_char_strings.values() )
lowerCAmelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCAmelCase = cha + cha
if sequence in two_char_strings:
lowerCAmelCase = two_char_strings[sequence]
lowerCAmelCase = int(_snake_case ) / all_sum
my_sec_sum += prob * math.loga(_snake_case )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def UpperCAmelCase ( _snake_case ):
lowerCAmelCase = Counter() # type: ignore
lowerCAmelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_snake_case ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def UpperCAmelCase ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 33
| 1
|
from torch import nn
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' )
| 6
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "encoder-decoder"
lowerCamelCase_ = True
def __init__( self :Optional[int] , **__A :str ) -> int:
"""simple docstring"""
super().__init__(**__A )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
SCREAMING_SNAKE_CASE__ = kwargs.pop("""encoder""" )
SCREAMING_SNAKE_CASE__ = encoder_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""decoder""" )
SCREAMING_SNAKE_CASE__ = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A )
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A )
SCREAMING_SNAKE_CASE__ = True
@classmethod
def _snake_case ( cls :str , __A :PretrainedConfig , __A :PretrainedConfig , **__A :List[str] ) -> PretrainedConfig:
"""simple docstring"""
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__A )
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.encoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.decoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output
| 6
| 1
|
'''simple docstring'''
from __future__ import annotations
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) ->list[list[str]]:
lowercase_ = word_bank or []
# create a table
lowercase_ = len(SCREAMING_SNAKE_CASE_ ) + 1
lowercase_ = []
for _ in range(SCREAMING_SNAKE_CASE_ ):
table.append([] )
# seed value
lowercase_ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(SCREAMING_SNAKE_CASE_ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(SCREAMING_SNAKE_CASE_ )] == word:
lowercase_ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(SCREAMING_SNAKE_CASE_ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(SCREAMING_SNAKE_CASE_ )]:
combination.reverse()
return table[len(SCREAMING_SNAKE_CASE_ )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 603
|
'''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ = "The quick brown fox jumps over the lazy dog" , ) ->bool:
lowercase_ = set()
# Replace all the whitespace in our sentence
lowercase_ = input_str.replace(""" """ , """""" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == 26
def A_ ( SCREAMING_SNAKE_CASE_ = "The quick brown fox jumps over the lazy dog" , ) ->bool:
lowercase_ = [False] * 26
for char in input_str:
if char.islower():
lowercase_ = True
elif char.isupper():
lowercase_ = True
return all(SCREAMING_SNAKE_CASE_ )
def A_ ( SCREAMING_SNAKE_CASE_ = "The quick brown fox jumps over the lazy dog" , ) ->bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def A_ ( ) ->None:
from timeit import timeit
lowercase_ = """from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"""
print(timeit("""is_pangram()""" , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit("""is_pangram_faster()""" , setup=SCREAMING_SNAKE_CASE_ ) )
print(timeit("""is_pangram_fastest()""" , setup=SCREAMING_SNAKE_CASE_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 603
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {
'''configuration_blenderbot_small''': [
'''BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotSmallConfig''',
'''BlenderbotSmallOnnxConfig''',
],
'''tokenization_blenderbot_small''': ['''BlenderbotSmallTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''BlenderbotSmallTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotSmallForCausalLM''',
'''BlenderbotSmallForConditionalGeneration''',
'''BlenderbotSmallModel''',
'''BlenderbotSmallPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''TFBlenderbotSmallForConditionalGeneration''',
'''TFBlenderbotSmallModel''',
'''TFBlenderbotSmallPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''FlaxBlenderbotSmallForConditionalGeneration''',
'''FlaxBlenderbotSmallModel''',
'''FlaxBlenderbotSmallPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 119
|
'''simple docstring'''
from collections import defaultdict
from math import gcd
def __UpperCamelCase ( lowercase__ : int = 1_50_00_00 ):
'''simple docstring'''
__lowercase =defaultdict(lowercase__ )
__lowercase =2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1, lowercase__, 2 ):
if gcd(lowercase__, lowercase__ ) > 1:
continue
__lowercase =2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(lowercase__, limit + 1, lowercase__ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 119
| 1
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCamelCase__ : str = logging.get_logger(__name__)
lowerCamelCase__ : Optional[Any] = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """mctct"""
def __init__( self :str , lowerCamelCase_ :Dict=80_65 , lowerCamelCase_ :Optional[Any]=15_36 , lowerCamelCase_ :int=36 , lowerCamelCase_ :List[str]=61_44 , lowerCamelCase_ :Optional[int]=4 , lowerCamelCase_ :Tuple=3_84 , lowerCamelCase_ :str=9_20 , lowerCamelCase_ :List[str]=1E-5 , lowerCamelCase_ :Optional[int]=0.3 , lowerCamelCase_ :str="relu" , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :str=0.3 , lowerCamelCase_ :Union[str, Any]=0.3 , lowerCamelCase_ :Optional[Any]=1 , lowerCamelCase_ :Union[str, Any]=0 , lowerCamelCase_ :str=2 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :Union[str, Any]=0.3 , lowerCamelCase_ :List[Any]=1 , lowerCamelCase_ :Optional[int]=(7,) , lowerCamelCase_ :List[str]=(3,) , lowerCamelCase_ :Dict=80 , lowerCamelCase_ :Union[str, Any]=1 , lowerCamelCase_ :Any=None , lowerCamelCase_ :str="sum" , lowerCamelCase_ :List[Any]=False , **lowerCamelCase_ :Tuple , ) -> Any:
'''simple docstring'''
super().__init__(**lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = attention_head_dim
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : str = layerdrop
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = pad_token_id
SCREAMING_SNAKE_CASE : Tuple = bos_token_id
SCREAMING_SNAKE_CASE : List[str] = eos_token_id
SCREAMING_SNAKE_CASE : int = conv_glu_dim
SCREAMING_SNAKE_CASE : str = conv_dropout
SCREAMING_SNAKE_CASE : Tuple = num_conv_layers
SCREAMING_SNAKE_CASE : int = input_feat_per_channel
SCREAMING_SNAKE_CASE : Optional[Any] = input_channels
SCREAMING_SNAKE_CASE : Any = conv_channels
SCREAMING_SNAKE_CASE : Tuple = ctc_loss_reduction
SCREAMING_SNAKE_CASE : Any = ctc_zero_infinity
# prevents config testing fail with exporting to json
SCREAMING_SNAKE_CASE : Dict = list(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = list(lowerCamelCase_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel)` == `config.num_conv_layers` '''
f"but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, "
f"`config.num_conv_layers = {self.num_conv_layers}`." )
| 713
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase__ : Dict = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
"microsoft/layoutlmv3-base": "https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json",
}
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = """layoutlmv3"""
def __init__( self :str , lowerCamelCase_ :Optional[Any]=5_02_65 , lowerCamelCase_ :Dict=7_68 , lowerCamelCase_ :Union[str, Any]=12 , lowerCamelCase_ :Optional[Any]=12 , lowerCamelCase_ :Union[str, Any]=30_72 , lowerCamelCase_ :Any="gelu" , lowerCamelCase_ :Union[str, Any]=0.1 , lowerCamelCase_ :str=0.1 , lowerCamelCase_ :Any=5_12 , lowerCamelCase_ :int=2 , lowerCamelCase_ :Optional[Any]=0.0_2 , lowerCamelCase_ :Optional[int]=1E-5 , lowerCamelCase_ :Dict=1 , lowerCamelCase_ :int=0 , lowerCamelCase_ :Tuple=2 , lowerCamelCase_ :List[str]=10_24 , lowerCamelCase_ :Tuple=1_28 , lowerCamelCase_ :Any=1_28 , lowerCamelCase_ :Optional[Any]=True , lowerCamelCase_ :str=32 , lowerCamelCase_ :int=1_28 , lowerCamelCase_ :int=64 , lowerCamelCase_ :List[Any]=2_56 , lowerCamelCase_ :Any=True , lowerCamelCase_ :str=True , lowerCamelCase_ :Union[str, Any]=True , lowerCamelCase_ :List[str]=2_24 , lowerCamelCase_ :Dict=3 , lowerCamelCase_ :Union[str, Any]=16 , lowerCamelCase_ :Any=None , **lowerCamelCase_ :Optional[Any] , ) -> int:
'''simple docstring'''
super().__init__(
vocab_size=lowerCamelCase_ , hidden_size=lowerCamelCase_ , num_hidden_layers=lowerCamelCase_ , num_attention_heads=lowerCamelCase_ , intermediate_size=lowerCamelCase_ , hidden_act=lowerCamelCase_ , hidden_dropout_prob=lowerCamelCase_ , attention_probs_dropout_prob=lowerCamelCase_ , max_position_embeddings=lowerCamelCase_ , type_vocab_size=lowerCamelCase_ , initializer_range=lowerCamelCase_ , layer_norm_eps=lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = max_ad_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = coordinate_size
SCREAMING_SNAKE_CASE : Tuple = shape_size
SCREAMING_SNAKE_CASE : Optional[int] = has_relative_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_pos_bins
SCREAMING_SNAKE_CASE : int = max_rel_pos
SCREAMING_SNAKE_CASE : Any = has_spatial_attention_bias
SCREAMING_SNAKE_CASE : List[Any] = rel_ad_pos_bins
SCREAMING_SNAKE_CASE : Dict = max_rel_ad_pos
SCREAMING_SNAKE_CASE : Optional[int] = text_embed
SCREAMING_SNAKE_CASE : Any = visual_embed
SCREAMING_SNAKE_CASE : Any = input_size
SCREAMING_SNAKE_CASE : Tuple = num_channels
SCREAMING_SNAKE_CASE : List[str] = patch_size
SCREAMING_SNAKE_CASE : str = classifier_dropout
class lowercase__( _UpperCAmelCase ):
'''simple docstring'''
UpperCamelCase = version.parse("""1.12""" )
@property
def __lowerCAmelCase ( self :List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def __lowerCAmelCase ( self :Optional[int] ) -> float:
'''simple docstring'''
return 1E-5
@property
def __lowerCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
return 12
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :"ProcessorMixin" , lowerCamelCase_ :int = -1 , lowerCamelCase_ :int = -1 , lowerCamelCase_ :bool = False , lowerCamelCase_ :Optional["TensorType"] = None , lowerCamelCase_ :int = 3 , lowerCamelCase_ :int = 40 , lowerCamelCase_ :int = 40 , ) -> Mapping[str, Any]:
'''simple docstring'''
setattr(processor.image_processor , '''apply_ocr''' , lowerCamelCase_ )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Dict = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : Union[str, Any] = processor.tokenizer.num_special_tokens_to_add(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = compute_effective_axis_dimension(
lowerCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase_ )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : Union[str, Any] = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
SCREAMING_SNAKE_CASE : int = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_images(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = dict(
processor(
lowerCamelCase_ , text=lowerCamelCase_ , boxes=lowerCamelCase_ , return_tensors=lowerCamelCase_ , ) )
return inputs
| 18
| 0
|
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowercase ( _A ):
'''simple docstring'''
def _a ( self ):
lowerCAmelCase_: Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "embed_dim" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "num_heads" ) )
class _lowercase :
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=64 , lowerCamelCase__=3 , lowerCamelCase__=[16, 48, 96] , lowerCamelCase__=[1, 3, 6] , lowerCamelCase__=[1, 2, 10] , lowerCamelCase__=[7, 3, 3] , lowerCamelCase__=[4, 2, 2] , lowerCamelCase__=[2, 1, 1] , lowerCamelCase__=[2, 2, 2] , lowerCamelCase__=[False, False, True] , lowerCamelCase__=[0.0, 0.0, 0.0] , lowerCamelCase__=0.0_2 , lowerCamelCase__=1E-12 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=2 , ):
lowerCAmelCase_: Any = parent
lowerCAmelCase_: Dict = batch_size
lowerCAmelCase_: List[Any] = image_size
lowerCAmelCase_: Union[str, Any] = patch_sizes
lowerCAmelCase_: Optional[int] = patch_stride
lowerCAmelCase_: Any = patch_padding
lowerCAmelCase_: Optional[Any] = is_training
lowerCAmelCase_: Dict = use_labels
lowerCAmelCase_: Tuple = num_labels
lowerCAmelCase_: Any = num_channels
lowerCAmelCase_: List[str] = embed_dim
lowerCAmelCase_: str = num_heads
lowerCAmelCase_: Optional[Any] = stride_kv
lowerCAmelCase_: List[str] = depth
lowerCAmelCase_: List[str] = cls_token
lowerCAmelCase_: Union[str, Any] = attention_drop_rate
lowerCAmelCase_: Dict = initializer_range
lowerCAmelCase_: List[str] = layer_norm_eps
def _a ( self ):
lowerCAmelCase_: Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_: List[Any] = None
if self.use_labels:
lowerCAmelCase_: Tuple = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_: List[Any] = self.get_config()
return config, pixel_values, labels
def _a ( self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: Tuple = CvtModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase_: Any = model(UpperCamelCase__ )
lowerCAmelCase_: Tuple = (self.image_size, self.image_size)
lowerCAmelCase_ , lowerCAmelCase_: Optional[int] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
lowerCAmelCase_: int = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
lowerCAmelCase_: List[str] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _a ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: int = self.num_labels
lowerCAmelCase_: Optional[int] = CvtForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase_: List[str] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self ):
lowerCAmelCase_: Dict = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_: List[str] = config_and_inputs
lowerCAmelCase_: int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _A , _A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE: Tuple = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE: Dict = (
{'feature-extraction': CvtModel, 'image-classification': CvtForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE: Optional[int] = False
SCREAMING_SNAKE_CASE: Any = False
SCREAMING_SNAKE_CASE: List[Any] = False
SCREAMING_SNAKE_CASE: Optional[int] = False
SCREAMING_SNAKE_CASE: Optional[int] = False
def _a ( self ):
lowerCAmelCase_: int = CvtModelTester(self )
lowerCAmelCase_: Optional[int] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def _a ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self ):
return
@unittest.skip(reason="Cvt does not output attentions" )
def _a ( self ):
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def _a ( self ):
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def _a ( self ):
pass
def _a ( self ):
lowerCAmelCase_ , lowerCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_: List[str] = model_class(UpperCamelCase__ )
lowerCAmelCase_: Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_: Any = [*signature.parameters.keys()]
lowerCAmelCase_: List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _a ( self ):
lowerCAmelCase_: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _a ( self ):
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCAmelCase_: List[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_: List[str] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCAmelCase_: Any = outputs.hidden_states
lowerCAmelCase_: Any = len(self.model_tester.depth )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCAmelCase_ , lowerCAmelCase_: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_: List[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_: str = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _a ( self ):
lowerCAmelCase_: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _a ( self ):
pass
@slow
def _a ( self ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_: Dict = CvtModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def snake_case__ ( ):
lowerCAmelCase_: Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _a ( self ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _a ( self ):
lowerCAmelCase_: List[Any] = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(UpperCamelCase__ )
lowerCAmelCase_: Tuple = self.default_image_processor
lowerCAmelCase_: Optional[Any] = prepare_img()
lowerCAmelCase_: int = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase_: List[str] = model(**UpperCamelCase__ )
# verify the logits
lowerCAmelCase_: List[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCAmelCase_: str = torch.tensor([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 613
|
'''simple docstring'''
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__snake_case : int = logging.get_logger(__name__)
class lowercase_ ( _A ):
def __init__( self , **UpperCamelCase__ ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["bs4"] )
super().__init__(**UpperCamelCase__ )
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCAmelCase_ = parent.find_all(child.name , recursive=UpperCamelCase__ )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(UpperCamelCase__ ) else next(i for i, s in enumerate(UpperCamelCase__ , 1 ) if s is child ) )
UpperCAmelCase_ = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def lowerCamelCase_ ( self , UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = BeautifulSoup(UpperCamelCase__ , "html.parser" )
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for element in html_code.descendants:
if type(UpperCamelCase__ ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCAmelCase_ = html.unescape(UpperCamelCase__ ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(UpperCamelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = self.xpath_soup(UpperCamelCase__ )
stringaxtag_seq.append(UpperCamelCase__ )
stringaxsubs_seq.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("Number of doc strings and xtags does not correspond" )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError("Number of doc strings and xsubs does not correspond" )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def lowerCamelCase_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
"""simple docstring"""
UpperCAmelCase_ = ""
for tagname, subs in zip(UpperCamelCase__ , UpperCamelCase__ ):
xpath += F"""/{tagname}"""
if subs != 0:
xpath += F"""[{subs}]"""
return xpath
def __call__( self , UpperCamelCase__ ) -> BatchFeature:
"""simple docstring"""
UpperCAmelCase_ = False
# Check that strings has a valid type
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = True
elif isinstance(UpperCamelCase__ , (list, tuple) ):
if len(UpperCamelCase__ ) == 0 or isinstance(html_strings[0] , UpperCamelCase__ ):
UpperCAmelCase_ = True
if not valid_strings:
raise ValueError(
"HTML strings must of type `str`, `List[str]` (batch of examples), "
F"""but is of type {type(UpperCamelCase__ )}.""" )
UpperCAmelCase_ = bool(isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(html_strings[0] , UpperCamelCase__ )) )
if not is_batched:
UpperCAmelCase_ = [html_strings]
# Get nodes + xpaths
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for html_string in html_strings:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.get_three_from_single(UpperCamelCase__ )
nodes.append(UpperCamelCase__ )
UpperCAmelCase_ = []
for node, tag_list, sub_list in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase_ = self.construct_xpath(UpperCamelCase__ , UpperCamelCase__ )
xpath_strings.append(UpperCamelCase__ )
xpaths.append(UpperCamelCase__ )
# return as Dict
UpperCAmelCase_ = {"nodes": nodes, "xpaths": xpaths}
UpperCAmelCase_ = BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
return encoded_inputs
| 660
| 0
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
_lowerCAmelCase = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for attribute in key.split('.' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_lowerCAmelCase : str = 'lm_head'
_lowerCAmelCase : Optional[Any] = getattr(lowerCamelCase_ , lowerCamelCase_ )
if weight_type is not None:
_lowerCAmelCase : List[Any] = getattr(lowerCamelCase_ , lowerCamelCase_ ).shape
else:
_lowerCAmelCase : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_lowerCAmelCase : Optional[Any] = value
elif weight_type == "weight_g":
_lowerCAmelCase : Union[str, Any] = value
elif weight_type == "weight_v":
_lowerCAmelCase : List[Any] = value
elif weight_type == "bias":
_lowerCAmelCase : Any = value
else:
_lowerCAmelCase : Optional[int] = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : int = fairseq_model.state_dict()
_lowerCAmelCase : int = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
_lowerCAmelCase : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
_lowerCAmelCase : List[Any] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_lowerCAmelCase : Any = True
if "*" in mapped_key:
_lowerCAmelCase : Any = name.split(lowerCamelCase_ )[0].split('.' )[-2]
_lowerCAmelCase : Union[str, Any] = mapped_key.replace('*' , lowerCamelCase_ )
if "weight_g" in name:
_lowerCAmelCase : int = 'weight_g'
elif "weight_v" in name:
_lowerCAmelCase : Any = 'weight_v'
elif "bias" in name:
_lowerCAmelCase : Union[str, Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCAmelCase : Any = 'weight'
else:
_lowerCAmelCase : Tuple = None
set_recursively(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
continue
if not is_used:
unused_weights.append(lowerCamelCase_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = full_name.split('conv_layers.' )[-1]
_lowerCAmelCase : List[Any] = name.split('.' )
_lowerCAmelCase : Any = int(items[0] )
_lowerCAmelCase : int = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_lowerCAmelCase : Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_lowerCAmelCase : int = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_lowerCAmelCase : Optional[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_lowerCAmelCase : List[Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCamelCase_ )
@torch.no_grad()
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ):
'''simple docstring'''
if config_path is not None:
_lowerCAmelCase : Optional[Any] = UniSpeechConfig.from_pretrained(lowerCamelCase_ )
else:
_lowerCAmelCase : int = UniSpeechConfig()
if is_finetuned:
if dict_path:
_lowerCAmelCase : Union[str, Any] = Dictionary.load_from_json(lowerCamelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCAmelCase : List[Any] = target_dict.pad_index
_lowerCAmelCase : Dict = target_dict.bos_index
_lowerCAmelCase : Union[str, Any] = target_dict.eos_index
_lowerCAmelCase : Tuple = len(target_dict.symbols )
_lowerCAmelCase : Dict = os.path.join(lowerCamelCase_ , 'vocab.json' )
if not os.path.isdir(lowerCamelCase_ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase_ ) )
return
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
_lowerCAmelCase : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCAmelCase : Any = 42
_lowerCAmelCase : List[str] = 43
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowerCamelCase_ , lowerCamelCase_ )
_lowerCAmelCase : Optional[int] = WavaVecaPhonemeCTCTokenizer(
lowerCamelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase_ , )
_lowerCAmelCase : Optional[Any] = True if config.feat_extract_norm == 'layer' else False
_lowerCAmelCase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
_lowerCAmelCase : Tuple = WavaVecaProcessor(feature_extractor=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
_lowerCAmelCase : Dict = UniSpeechForCTC(lowerCamelCase_ )
else:
_lowerCAmelCase : List[Any] = UniSpeechForPreTraining(lowerCamelCase_ )
if is_finetuned:
_lowerCAmelCase : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} )
else:
_lowerCAmelCase : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowerCAmelCase : int = model[0].eval()
recursively_load_weights(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
hf_unispeech.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_lowerCAmelCase = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 706
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , unittest.TestCase ):
_UpperCAmelCase = LDMTextToImagePipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = False
def __lowerCamelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,)
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=_A ,set_alpha_to_one=_A ,)
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=(32, 64) ,in_channels=3 ,out_channels=3 ,down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') ,up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') ,latent_channels=4 ,)
torch.manual_seed(0 )
_lowerCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,)
_lowerCAmelCase : Tuple = CLIPTextModel(_A )
_lowerCAmelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vqvae': vae,
'bert': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowerCamelCase ( self ,_A ,_A=0 ):
'''simple docstring'''
if str(_A ).startswith('mps' ):
_lowerCAmelCase : int = torch.manual_seed(_A )
else:
_lowerCAmelCase : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
_lowerCAmelCase : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : str = LDMTextToImagePipeline(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : List[Any] = self.get_dummy_inputs(_A )
_lowerCAmelCase : Any = pipe(**_A ).images
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
_lowerCAmelCase : Tuple = np.array([0.6_1_0_1, 0.6_1_5_6, 0.5_6_2_2, 0.4_8_9_5, 0.6_6_6_1, 0.3_8_0_4, 0.5_7_4_8, 0.6_1_3_6, 0.5_0_1_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.manual_seed(_A )
_lowerCAmelCase : Union[str, Any] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : Optional[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : Optional[Any] = self.get_inputs(_A )
_lowerCAmelCase : List[Any] = pipe(**_A ).images
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
_lowerCAmelCase : str = np.array([0.5_1_8_2_5, 0.5_2_8_5_0, 0.5_2_5_4_3, 0.5_4_2_5_8, 0.5_2_3_0_4, 0.5_2_5_6_9, 0.5_4_3_6_3, 0.5_5_2_7_6, 0.5_6_8_7_8] )
_lowerCAmelCase : Dict = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ,_A ,_A=torch.floataa ,_A=0 ):
'''simple docstring'''
_lowerCAmelCase : List[str] = torch.manual_seed(_A )
_lowerCAmelCase : Optional[int] = np.random.RandomState(_A ).standard_normal((1, 4, 32, 32) )
_lowerCAmelCase : List[Any] = torch.from_numpy(_A ).to(device=_A ,dtype=_A )
_lowerCAmelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(_A )
pipe.set_progress_bar_config(disable=_A )
_lowerCAmelCase : str = self.get_inputs(_A )
_lowerCAmelCase : Union[str, Any] = pipe(**_A ).images[0]
_lowerCAmelCase : int = load_numpy(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' )
_lowerCAmelCase : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 16
| 0
|
from __future__ import annotations
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
if days_between_payments <= 0:
raise ValueError("""days_between_payments must be > 0""" )
if daily_interest_rate < 0:
raise ValueError("""daily_interest_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * daily_interest_rate * days_between_payments
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
if number_of_compounding_periods <= 0:
raise ValueError("""number_of_compounding_periods must be > 0""" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("""nominal_annual_interest_rate_percentage must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
if number_of_years <= 0:
raise ValueError("""number_of_years must be > 0""" )
if nominal_annual_percentage_rate < 0:
raise ValueError("""nominal_annual_percentage_rate must be >= 0""" )
if principal <= 0:
raise ValueError("""principal must be > 0""" )
return compound_interest(
lowerCAmelCase__ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99
|
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( UpperCamelCase="ro" , UpperCamelCase="en" , UpperCamelCase="wmt16" , UpperCamelCase=None ):
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
lowerCAmelCase__ : Optional[Any] = f"""{src_lang}-{tgt_lang}"""
print(f"""Converting {dataset}-{pair}""" )
lowerCAmelCase__ : Any = datasets.load_dataset(UpperCamelCase , UpperCamelCase )
if save_dir is None:
lowerCAmelCase__ : Optional[Any] = f"""{dataset}-{pair}"""
lowerCAmelCase__ : Optional[Any] = Path(UpperCamelCase )
save_dir.mkdir(exist_ok=UpperCamelCase )
for split in ds.keys():
print(f"""Splitting {split} with {ds[split].num_rows} records""" )
# to save to val.source, val.target like summary datasets
lowerCAmelCase__ : str = """val""" if split == """validation""" else split
lowerCAmelCase__ : Optional[int] = save_dir.joinpath(f"""{fn}.source""" )
lowerCAmelCase__ : Any = save_dir.joinpath(f"""{fn}.target""" )
lowerCAmelCase__ : Union[str, Any] = src_path.open("""w+""" )
lowerCAmelCase__ : Optional[int] = tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
lowerCAmelCase__ : Optional[int] = x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(f"""Saved {dataset} dataset to {save_dir}""" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 565
| 0
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __magic_name__ ( __a : Optional[Any] , __a : str , __a : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCamelCase__ = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
UpperCamelCase__ = f"{src_lang}-{tgt_lang}"
UpperCamelCase__ = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(__a , exist_ok=__a )
UpperCamelCase__ = os.path.join(__a , """README.md""" )
print(f"Generating {path}" )
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.write(__a )
# make sure we are under the root of the project
lowerCamelCase_ = Path(__file__).resolve().parent.parent.parent
lowerCamelCase_ = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = model_name.split('''-''')
lowerCamelCase_ = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 86
|
from __future__ import annotations
lowerCamelCase_ = '''#'''
class __A:
"""simple docstring"""
def __init__(self ):
UpperCamelCase__ = {}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self._trie
for char in text:
if char not in trie:
UpperCamelCase__ = {}
UpperCamelCase__ = trie[char]
UpperCamelCase__ = True
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self._trie
for char in prefix:
if char in trie:
UpperCamelCase__ = trie[char]
else:
return []
return self._elements(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = []
for c, v in d.items():
UpperCamelCase__ = [""" """] if c == END else [(c + s) for s in self._elements(SCREAMING_SNAKE_CASE_ )]
result.extend(SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = Trie()
lowerCamelCase_ = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def __magic_name__ ( __a : str ):
'''simple docstring'''
UpperCamelCase__ = trie.find_word(__a )
return tuple(string + word for word in suffixes )
def __magic_name__ ( ):
'''simple docstring'''
print(autocomplete_using_trie("""de""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 86
| 1
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
snake_case__ : Optional[int] = logging.get_logger(__name__)
snake_case__ : Optional[Any] = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
snake_case__ : str = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
snake_case__ : Any = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = "whisper"
_a = ["past_key_values"]
_a = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Any , __a : List[str]=51_865 , __a : Any=80 , __a : Optional[Any]=6 , __a : str=4 , __a : str=6 , __a : Tuple=4 , __a : Optional[Any]=1_536 , __a : Optional[Any]=1_536 , __a : Union[str, Any]=0.0 , __a : int=0.0 , __a : Union[str, Any]=50_257 , __a : Optional[Any]=True , __a : List[Any]=True , __a : Optional[int]="gelu" , __a : Any=256 , __a : Any=0.0 , __a : Dict=0.0 , __a : str=0.0 , __a : Any=0.02 , __a : Optional[Any]=False , __a : int=1_500 , __a : Any=448 , __a : int=50_256 , __a : int=50_256 , __a : int=50_256 , __a : Optional[int]=None , __a : Union[str, Any]=[220, 50_256] , __a : Tuple=False , __a : List[str]=256 , __a : Optional[int]=False , __a : int=0.05 , __a : List[str]=10 , __a : List[Any]=2 , __a : str=0.0 , __a : Dict=10 , __a : List[Any]=0 , __a : int=7 , **__a : Optional[Any] , ) ->str:
lowerCamelCase_ : Union[str, Any] = vocab_size
lowerCamelCase_ : Dict = num_mel_bins
lowerCamelCase_ : Union[str, Any] = d_model
lowerCamelCase_ : Dict = encoder_layers
lowerCamelCase_ : Any = encoder_attention_heads
lowerCamelCase_ : Optional[int] = decoder_layers
lowerCamelCase_ : Tuple = decoder_attention_heads
lowerCamelCase_ : Dict = decoder_ffn_dim
lowerCamelCase_ : Dict = encoder_ffn_dim
lowerCamelCase_ : Tuple = dropout
lowerCamelCase_ : int = attention_dropout
lowerCamelCase_ : Tuple = activation_dropout
lowerCamelCase_ : Union[str, Any] = activation_function
lowerCamelCase_ : List[Any] = init_std
lowerCamelCase_ : Any = encoder_layerdrop
lowerCamelCase_ : str = decoder_layerdrop
lowerCamelCase_ : Dict = use_cache
lowerCamelCase_ : Tuple = encoder_layers
lowerCamelCase_ : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase_ : Any = max_source_positions
lowerCamelCase_ : Optional[Any] = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowerCamelCase_ : Optional[int] = classifier_proj_size
lowerCamelCase_ : List[Any] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase_ : List[str] = apply_spec_augment
lowerCamelCase_ : Optional[Any] = mask_time_prob
lowerCamelCase_ : int = mask_time_length
lowerCamelCase_ : Any = mask_time_min_masks
lowerCamelCase_ : int = mask_feature_prob
lowerCamelCase_ : Optional[int] = mask_feature_length
lowerCamelCase_ : Dict = mask_feature_min_masks
lowerCamelCase_ : int = median_filter_width
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , suppress_tokens=__a , begin_suppress_tokens=__a , **__a , )
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self : List[Any] ) ->Mapping[str, Mapping[int, str]]:
lowerCamelCase_ : str = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
lowerCamelCase_ : Dict = {0: """batch"""}
else:
lowerCamelCase_ : str = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__a , direction="""inputs""" )
return common_inputs
def _lowerCAmelCase ( self : Dict , __a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , __a : int = 22_050 , __a : float = 5.0 , __a : int = 220 , ) ->Mapping[str, Any]:
lowerCamelCase_ : Any = OrderedDict()
lowerCamelCase_ : str = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=__a , framework=__a , sampling_rate=__a , time_duration=__a , frequency=__a , )
lowerCamelCase_ : Union[str, Any] = encoder_inputs["""input_features"""].shape[2]
lowerCamelCase_ : Tuple = encoder_sequence_length // 2 if self.use_past else seq_length
lowerCamelCase_ : Optional[int] = super().generate_dummy_inputs(
preprocessor.tokenizer , __a , __a , __a , __a )
lowerCamelCase_ : Optional[int] = encoder_inputs.pop("""input_features""" )
lowerCamelCase_ : Tuple = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
lowerCamelCase_ : Tuple = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def _lowerCAmelCase ( self : Tuple ) ->float:
return 1e-3
| 278
|
def __lowerCamelCase ( A__ : float , A__ : float , A__ : float , A__ : float , A__ : float , ) -> float:
lowerCamelCase_ : List[str] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
lowerCamelCase_ : Union[str, Any] = 1 - (matter_density + radiation_density + dark_energy)
lowerCamelCase_ : int = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowerCamelCase_ : Dict = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
snake_case__ : str = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 278
| 1
|
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase__ = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase__ = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
lowerCamelCase__ = '''▁'''
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowercase_ : int , lowercase_ : str="</s>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : Dict="<pad>" , lowercase_ : Tuple=100 , lowercase_ : str=None , lowercase_ : Optional[Dict[str, Any]] = None , lowercase_ : str=True , **lowercase_ : Optional[Any] , ) -> None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
_UpperCamelCase = [f'<extra_id_{i}>' for i in range(lowercase_)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCamelCase = len(set(filter(lambda lowercase_: bool("extra_id" in str(lowercase_)) , lowercase_)))
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens")
if legacy:
logger.warning_once(
f'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565")
_UpperCamelCase = legacy
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , extra_ids=lowercase_ , additional_special_tokens=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowercase_ , **lowercase_ , )
_UpperCamelCase = vocab_file
_UpperCamelCase = extra_ids
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowercase_)
@staticmethod
def __UpperCAmelCase ( lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : str) -> Any:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
_UpperCamelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowercase_ , )
return max_model_length
@property
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def __UpperCAmelCase ( self : Dict) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = {self.convert_ids_to_tokens(lowercase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __UpperCAmelCase ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase_)) + [1]
return ([0] * len(lowercase_)) + [1] + ([0] * len(lowercase_)) + [1]
def __UpperCAmelCase ( self : str) -> Dict:
"""simple docstring"""
return list(
set(filter(lambda lowercase_: bool(re.search(R"<extra_id_\d+>" , lowercase_)) is not None , self.additional_special_tokens)))
def __UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
return [self._convert_token_to_id(lowercase_) for token in self.get_sentinel_tokens()]
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : List[int]) -> List[int]:
"""simple docstring"""
if len(lowercase_) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
" eos tokens being added.")
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCAmelCase ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None) -> List[int]:
"""simple docstring"""
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
if token_ids_a is None:
return token_ids_a
else:
_UpperCamelCase = self._add_eos_if_not_present(lowercase_)
return token_ids_a + token_ids_a
def __getstate__( self : Tuple) -> Any:
"""simple docstring"""
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : Optional[Any] , lowercase_ : Any) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __UpperCAmelCase ( self : int , lowercase_ : "TextInput" , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = SPIECE_UNDERLINE + text.replace(lowercase_ , " ")
return super().tokenize(lowercase_ , **lowercase_)
def __UpperCAmelCase ( self : Union[str, Any] , lowercase_ : int , **lowercase_ : Optional[int]) -> List[str]:
"""simple docstring"""
if not self.legacy:
_UpperCamelCase = text.startswith(lowercase_)
if is_first:
_UpperCamelCase = text[1:]
_UpperCamelCase = self.sp_model.encode(lowercase_ , out_type=lowercase_)
if not self.legacy and not is_first and not text.startswith(" ") and tokens[0].startswith(lowercase_):
_UpperCamelCase = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
return tokens
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : Optional[Any]) -> List[Any]:
"""simple docstring"""
if token.startswith("<extra_id_"):
_UpperCamelCase = re.match(R"<extra_id_(\d+)>" , lowercase_)
_UpperCamelCase = int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase_)
def __UpperCAmelCase ( self : List[Any] , lowercase_ : Any) -> int:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
_UpperCamelCase = self.sp_model.IdToPiece(lowercase_)
else:
_UpperCamelCase = f'<extra_id_{self.vocab_size - 1 - index}>'
return token
def __UpperCAmelCase ( self : Dict , lowercase_ : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = ""
_UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase_) + token
_UpperCamelCase = True
_UpperCamelCase = []
else:
current_sub_tokens.append(lowercase_)
_UpperCamelCase = False
out_string += self.sp_model.decode(lowercase_)
return out_string.strip()
def __UpperCAmelCase ( self : List[str] , lowercase_ : str , lowercase_ : Optional[str] = None) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase_):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
_UpperCamelCase = os.path.join(
lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowercase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowercase_)
elif not os.path.isfile(self.vocab_file):
with open(lowercase_ , "wb") as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase_)
return (out_vocab_file,)
| 718
|
def lowerCAmelCase__ ( a__ = 50 ) ->int:
'''simple docstring'''
_UpperCamelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 82
| 0
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
__lowercase : Optional[int] = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 36
|
def lowerCAmelCase ( UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : int ) -> list:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Tuple = len(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE: Optional[int] = [[0] * n for i in range(UpperCamelCase__ )]
for i in range(UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE: str = y_points[i]
for i in range(2 , UpperCamelCase__ ):
for j in range(UpperCamelCase__ , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE: Optional[int] = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 202
| 0
|
from __future__ import annotations
def _lowerCAmelCase ( UpperCamelCase__: list[int] ) -> int:
"""simple docstring"""
A = len(UpperCamelCase__ ) // 2
# choose the middle 3 elements
A = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 546
|
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , a__ , a__=13 , a__=30 , a__=2 , a__=3 , a__=True , a__=True , a__=32 , a__=2 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=10 , a__=0.02 , a__=3 , a__=0.6 , a__=None , ) -> Union[str, Any]:
A = parent
A = batch_size
A = image_size
A = patch_size
A = num_channels
A = is_training
A = use_labels
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = type_sequence_label_size
A = initializer_range
A = mask_ratio
A = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
A = (image_size // patch_size) ** 2
A = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Any:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def _UpperCAmelCase ( self , a__ , a__ , a__ ) -> Optional[int]:
A = TFViTMAEModel(config=a__ )
A = model(a__ , training=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , a__ , a__ , a__ ) -> int:
A = TFViTMAEForPreTraining(a__ )
A = model(a__ , training=a__ )
# expected sequence length = num_patches
A = (self.image_size // self.patch_size) ** 2
A = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
A = 1
A = TFViTMAEForPreTraining(a__ )
A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A = model(a__ , training=a__ )
A = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
A = self.prepare_config_and_inputs()
((A) , (A) , (A)) = config_and_inputs
A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowerCAmelCase = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def _UpperCAmelCase ( self ) -> Dict:
A = TFViTMAEModelTester(self )
A = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def _UpperCAmelCase ( self ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def _UpperCAmelCase ( self ) -> Optional[int]:
pass
def _UpperCAmelCase ( self ) -> int:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , tf.keras.layers.Layer ) )
def _UpperCAmelCase ( self ) -> Any:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(a__ )
A = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a__ )
def _UpperCAmelCase ( self ) -> Optional[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def _UpperCAmelCase ( self ) -> int:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a__ )
def _UpperCAmelCase ( self ) -> int:
# make the mask reproducible
np.random.seed(2 )
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = int((config.image_size // config.patch_size) ** 2 )
A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
A = model_class(a__ )
A = self._prepare_for_class(a__ , a__ )
A = model(a__ , noise=a__ )
A = copy.deepcopy(self._prepare_for_class(a__ , a__ ) )
A = model(**a__ , noise=a__ )
A = outputs_dict[0].numpy()
A = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def _UpperCAmelCase ( self ) -> Optional[int]:
# make the mask reproducible
np.random.seed(2 )
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = int((config.image_size // config.patch_size) ** 2 )
A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(a__ ):
A = {}
for k, v in inputs_dict.items():
if tf.is_tensor(a__ ):
A = v.numpy()
else:
A = np.array(a__ )
return inputs_np_dict
for model_class in self.all_model_classes:
A = model_class(a__ )
A = self._prepare_for_class(a__ , a__ )
A = prepare_numpy_arrays(a__ )
A = model(a__ , noise=a__ )
A = model(**a__ , noise=a__ )
self.assert_outputs_same(a__ , a__ )
def _UpperCAmelCase ( self , a__ , a__ , a__ ) -> Dict:
# make masks reproducible
np.random.seed(2 )
A = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A = tf.constant(a__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
A = tf_noise
super().check_pt_tf_models(a__ , a__ , a__ )
def _UpperCAmelCase ( self ) -> Tuple:
# make mask reproducible
np.random.seed(2 )
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(a__ )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(a__ , a__ ),)
if isinstance(a__ , a__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(a__ , """_keras_serializable""" , a__ )
}
A = int((config.image_size // config.patch_size) ** 2 )
A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A = tf.convert_to_tensor(a__ )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
A = main_layer_class(a__ )
A = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
A = tf.keras.Model(a__ , outputs=main_layer(a__ ) )
A = model(a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(a__ , """keras_model.h5""" )
model.save(a__ )
A = tf.keras.models.load_model(
a__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(a__ , tf.keras.Model )
A = model(a__ )
self.assert_outputs_same(a__ , a__ )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
# make mask reproducible
np.random.seed(2 )
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = int((config.image_size // config.patch_size) ** 2 )
A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
A = model_class(a__ )
A = self._prepare_for_class(a__ , a__ )
A = model(a__ , noise=a__ )
if model_class.__name__ == "TFViTMAEModel":
A = outputs.last_hidden_state.numpy()
A = 0
else:
A = outputs.logits.numpy()
A = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a__ , saved_model=a__ )
A = model_class.from_pretrained(a__ )
A = model(a__ , noise=a__ )
if model_class.__name__ == "TFViTMAEModel":
A = after_outputs["""last_hidden_state"""].numpy()
A = 0
else:
A = after_outputs["""logits"""].numpy()
A = 0
A = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a__ , 1e-5 )
def _UpperCAmelCase ( self ) -> Dict:
# make mask reproducible
np.random.seed(2 )
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = int((config.image_size // config.patch_size) ** 2 )
A = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
A = model_class(a__ )
A = self._prepare_for_class(a__ , a__ )
A = model(a__ , noise=a__ )
A = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(a__ )
A = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
A = model_class.from_config(model.config )
A = new_model(a__ ) # Build model
new_model.set_weights(model.get_weights() )
A = new_model(a__ , noise=a__ )
self.assert_outputs_same(a__ , a__ )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def _UpperCAmelCase ( self ) -> Tuple:
pass
@slow
def _UpperCAmelCase ( self ) -> str:
A = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(a__ )
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
A = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _UpperCAmelCase ( self ) -> Optional[int]:
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> List[Any]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
A = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
A = self.default_image_processor
A = prepare_img()
A = image_processor(images=a__ , return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
A = ViTMAEConfig()
A = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
A = np.random.uniform(size=(1, num_patches) )
# forward pass
A = model(**a__ , noise=a__ )
# verify the logits
A = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , a__ )
A = tf.convert_to_tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , a__ , atol=1e-4 )
| 546
| 1
|
"""simple docstring"""
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self , __UpperCAmelCase = 1 , __UpperCAmelCase = None , __UpperCAmelCase = 5_0 , __UpperCAmelCase = "pil" , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__UpperCAmelCase , )
lowerCAmelCase__ :Tuple = image.to(self.device )
# set step values
self.scheduler.set_timesteps(__UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCAmelCase__ :Union[str, Any] = self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase__ :int = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ).prev_sample
lowerCAmelCase__ :int = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase__ :Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase__ :Union[str, Any] = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=__UpperCAmelCase ), "This is a local test"
| 93
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def _UpperCAmelCase (UpperCamelCase__ : Any ):
_A : List[Any] = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
_A : Union[str, Any] = 1024
_A : Tuple = 4096
_A : Dict = 24
_A : Union[str, Any] = 16
_A : Any = [5, 11, 17, 23]
_A : int = [256, 512, 1024, 1024]
_A : Optional[int] = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_A : str = 768
_A : Union[str, Any] = [1, 1, 1, 0.5]
_A : str = [256, 512, 768, 768]
_A : int = 150
_A : Dict = 16
_A : Any = (1, 384, 384)
_A : str = False
_A : Optional[int] = "project"
if "ade" in checkpoint_url:
_A : Optional[int] = True
_A : Union[str, Any] = 768
_A : Tuple = [1, 1, 1, 0.5]
_A : Union[str, Any] = 150
_A : str = 16
_A : Union[str, Any] = "huggingface/label-files"
_A : List[str] = "ade20k-id2label.json"
_A : Union[str, Any] = json.load(open(cached_download(hf_hub_url(UpperCamelCase__ , UpperCamelCase__ , repo_type="dataset" ) ) , "r" ) )
_A : List[Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_A : str = idalabel
_A : Optional[int] = {v: k for k, v in idalabel.items()}
_A : List[Any] = [1, 150, 480, 480]
return config, expected_shape
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] ):
_A : str = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase__ : List[Any] ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_A : List[Any] = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
_A : List[str] = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
_A : int = name.replace("patch_embed" , "" )
if "pos_embed" in name:
_A : int = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
_A : Optional[Any] = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
_A : Union[str, Any] = name.replace("proj" , "projection" )
if "blocks" in name:
_A : Optional[Any] = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
_A : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_A : Optional[Any] = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name and "backbone" not in name:
_A : Optional[int] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name and "backbone" not in name:
_A : Any = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
_A : int = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
_A : List[Any] = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
_A : Union[str, Any] = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
_A : str = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
_A : Union[str, Any] = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
_A : int = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
_A : Optional[Any] = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_A : Any = name.replace(f"refinenet{layer_idx}" , f"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
_A : str = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
_A : Any = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
_A : int = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
_A : Optional[int] = name.replace("conv1" , "convolution1" )
if "conv2" in name:
_A : Any = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_A : Optional[int] = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
_A : Tuple = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
_A : Dict = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
_A : Optional[int] = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_A : Any = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
_A : Optional[Any] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
_A : Union[str, Any] = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
_A : Union[str, Any] = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
_A : int = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
_A : Optional[Any] = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
_A : List[str] = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
_A : int = name.replace("pretrained" , "dpt" )
if "bn" in name:
_A : Dict = name.replace("bn" , "batch_norm" )
if "head" in name:
_A : int = name.replace("head" , "head.head" )
if "encoder.norm" in name:
_A : List[str] = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
_A : Union[str, Any] = name.replace("auxlayer" , "auxiliary_head.head" )
if "backbone" in name:
_A : List[str] = name.replace("backbone" , "backbone.bit.encoder" )
if ".." in name:
_A : Union[str, Any] = name.replace(".." , "." )
if "stem.conv" in name:
_A : List[Any] = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
_A : int = name.replace("blocks" , "layers" )
if "convolution" in name and "backbone" in name:
_A : Any = name.replace("convolution" , "conv" )
if "layer" in name and "backbone" in name:
_A : Dict = name.replace("layer" , "layers" )
if "backbone.bit.encoder.bit" in name:
_A : List[str] = name.replace("backbone.bit.encoder.bit" , "backbone.bit" )
if "embedder.conv" in name:
_A : int = name.replace("embedder.conv" , "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
_A : List[Any] = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" )
return name
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_A : Tuple = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.weight" )
_A : List[Any] = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_A : List[Any] = in_proj_weight[: config.hidden_size, :]
_A : List[Any] = in_proj_bias[: config.hidden_size]
_A : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_A : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_A : Tuple = in_proj_weight[
-config.hidden_size :, :
]
_A : Optional[Any] = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ():
_A : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_A : int = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
_A , _A : Optional[Any] = get_dpt_config(UpperCamelCase__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_A : List[str] = torch.load(UpperCamelCase__ , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(UpperCamelCase__ )
# rename keys
for key in state_dict.copy().keys():
_A : Dict = state_dict.pop(UpperCamelCase__ )
_A : Optional[Any] = val
# read in qkv matrices
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
_A : Union[str, Any] = DPTForSemanticSegmentation(UpperCamelCase__ ) if "ade" in checkpoint_url else DPTForDepthEstimation(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# Check outputs on an image
_A : Optional[Any] = 480 if "ade" in checkpoint_url else 384
_A : Any = DPTImageProcessor(size=UpperCamelCase__ )
_A : Dict = prepare_img()
_A : Tuple = image_processor(UpperCamelCase__ , return_tensors="pt" )
# forward pass
_A : int = model(**UpperCamelCase__ ).logits if "ade" in checkpoint_url else model(**UpperCamelCase__ ).predicted_depth
if show_prediction:
_A : List[Any] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=UpperCamelCase__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
lowerCAmelCase__ = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 503
| 0
|
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FileLock(str(tmpdir / """foo.lock""" ) )
SCREAMING_SNAKE_CASE = FileLock(str(tmpdir / """foo.lock""" ) )
SCREAMING_SNAKE_CASE = 0.01
with locka.acquire():
with pytest.raises(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = time.time()
locka.acquire(_SCREAMING_SNAKE_CASE )
assert time.time() - _start > timeout
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """a""" * 10_00 + """.lock"""
SCREAMING_SNAKE_CASE = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(_SCREAMING_SNAKE_CASE )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
SCREAMING_SNAKE_CASE = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(_SCREAMING_SNAKE_CASE ):
locka.acquire(0 )
| 116
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [2, 2, 6, 2] if """tiny""" in model_name else [2, 2, 18, 2]
SCREAMING_SNAKE_CASE = True if """large""" in model_name or """huge""" in model_name else False
SCREAMING_SNAKE_CASE = True if """large""" in model_name or """huge""" in model_name else False
SCREAMING_SNAKE_CASE = True if """large""" in model_name or """huge""" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
SCREAMING_SNAKE_CASE = [3, 3, 3, 3]
SCREAMING_SNAKE_CASE = [5, 5, 5, 5]
elif "fl4" in model_name:
SCREAMING_SNAKE_CASE = [4, 4, 4, 4]
SCREAMING_SNAKE_CASE = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
SCREAMING_SNAKE_CASE = [3, 3, 3, 3]
if "lrf" in model_name:
SCREAMING_SNAKE_CASE = [3, 3, 3, 3]
else:
SCREAMING_SNAKE_CASE = [2, 2, 2, 2]
if "tiny" in model_name:
SCREAMING_SNAKE_CASE = 96
elif "small" in model_name:
SCREAMING_SNAKE_CASE = 96
elif "base" in model_name:
SCREAMING_SNAKE_CASE = 1_28
elif "large" in model_name:
SCREAMING_SNAKE_CASE = 1_92
elif "xlarge" in model_name:
SCREAMING_SNAKE_CASE = 2_56
elif "huge" in model_name:
SCREAMING_SNAKE_CASE = 3_52
# set label information
SCREAMING_SNAKE_CASE = """huggingface/label-files"""
if "large" in model_name or "huge" in model_name:
SCREAMING_SNAKE_CASE = """imagenet-22k-id2label.json"""
else:
SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = FocalNetConfig(
embed_dim=_SCREAMING_SNAKE_CASE , depths=_SCREAMING_SNAKE_CASE , focal_levels=_SCREAMING_SNAKE_CASE , focal_windows=_SCREAMING_SNAKE_CASE , use_conv_embed=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE , use_post_layernorm=_SCREAMING_SNAKE_CASE , use_layerscale=_SCREAMING_SNAKE_CASE , )
return config
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
if "patch_embed.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
SCREAMING_SNAKE_CASE = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
SCREAMING_SNAKE_CASE = """encoder.""" + name
if "encoder.layers" in name:
SCREAMING_SNAKE_CASE = name.replace("""encoder.layers""" , """encoder.stages""" )
if "downsample.proj" in name:
SCREAMING_SNAKE_CASE = name.replace("""downsample.proj""" , """downsample.projection""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE = name.replace("""blocks""" , """layers""" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
SCREAMING_SNAKE_CASE = name.replace("""modulation.f""" , """modulation.projection_in""" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
SCREAMING_SNAKE_CASE = name.replace("""modulation.h""" , """modulation.projection_context""" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
SCREAMING_SNAKE_CASE = name.replace("""modulation.proj""" , """modulation.projection_out""" )
if name == "norm.weight":
SCREAMING_SNAKE_CASE = """layernorm.weight"""
if name == "norm.bias":
SCREAMING_SNAKE_CASE = """layernorm.bias"""
if "head" in name:
SCREAMING_SNAKE_CASE = name.replace("""head""" , """classifier""" )
else:
SCREAMING_SNAKE_CASE = """focalnet.""" + name
return name
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
"""focalnet-tiny""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth""",
"""focalnet-tiny-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth""",
"""focalnet-small""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth""",
"""focalnet-small-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth""",
"""focalnet-base""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth""",
"""focalnet-base-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth""",
"""focalnet-large-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth""",
"""focalnet-large-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth""",
"""focalnet-xlarge-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth""",
"""focalnet-xlarge-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth""",
}
# fmt: on
SCREAMING_SNAKE_CASE = model_name_to_url[model_name]
print("""Checkpoint URL: """ , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""model"""]
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE = state_dict.pop(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = val
SCREAMING_SNAKE_CASE = get_focalnet_config(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = FocalNetForImageClassification(_SCREAMING_SNAKE_CASE )
model.eval()
# load state dict
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# verify conversion
SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE = BitImageProcessor(
do_resize=_SCREAMING_SNAKE_CASE , size={"""shortest_edge""": 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=_SCREAMING_SNAKE_CASE , crop_size=2_24 , do_normalize=_SCREAMING_SNAKE_CASE , image_mean=_SCREAMING_SNAKE_CASE , image_std=_SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
SCREAMING_SNAKE_CASE = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
SCREAMING_SNAKE_CASE = image_transforms(_SCREAMING_SNAKE_CASE ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _SCREAMING_SNAKE_CASE , atol=1E-4 )
SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = outputs.logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
print("""First values of logits:""" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
SCREAMING_SNAKE_CASE = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
SCREAMING_SNAKE_CASE = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
SCREAMING_SNAKE_CASE = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
SCREAMING_SNAKE_CASE = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
SCREAMING_SNAKE_CASE = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
SCREAMING_SNAKE_CASE = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 116
| 1
|
def _snake_case (_snake_case : int , _snake_case : int) -> int:
return int(input_a == input_a == 0)
def _snake_case () -> None:
print('Truth Table of NOR Gate:')
print('| Input 1 | Input 2 | Output |')
print(f'''| 0 | 0 | {nor_gate(0 , 0)} |''')
print(f'''| 0 | 1 | {nor_gate(0 , 1)} |''')
print(f'''| 1 | 0 | {nor_gate(1 , 0)} |''')
print(f'''| 1 | 1 | {nor_gate(1 , 1)} |''')
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 181
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
_lowercase =0
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
_lowercase =AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32')
self.assertIsInstance(snake_case, snake_case)
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =Path(snake_case) / 'preprocessor_config.json'
_lowercase =Path(snake_case) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'}, open(snake_case, 'w'), )
json.dump({'model_type': 'clip'}, open(snake_case, 'w'))
_lowercase =AutoImageProcessor.from_pretrained(snake_case)
self.assertIsInstance(snake_case, snake_case)
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =Path(snake_case) / 'preprocessor_config.json'
_lowercase =Path(snake_case) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'}, open(snake_case, 'w'), )
json.dump({'model_type': 'clip'}, open(snake_case, 'w'))
_lowercase =AutoImageProcessor.from_pretrained(snake_case)
self.assertIsInstance(snake_case, snake_case)
def UpperCamelCase__ ( self :Union[str, Any]):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =CLIPConfig()
# Create a dummy config file with image_proceesor_type
_lowercase =Path(snake_case) / 'preprocessor_config.json'
_lowercase =Path(snake_case) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'}, open(snake_case, 'w'), )
json.dump({'model_type': 'clip'}, open(snake_case, 'w'))
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_lowercase =AutoImageProcessor.from_pretrained(snake_case).to_dict()
config_dict.pop('image_processor_type')
_lowercase =CLIPImageProcessor(**snake_case)
# save in new folder
model_config.save_pretrained(snake_case)
config.save_pretrained(snake_case)
_lowercase =AutoImageProcessor.from_pretrained(snake_case)
# make sure private variable is not incorrectly saved
_lowercase =json.loads(config.to_json_string())
self.assertTrue('_processor_class' not in dict_as_saved)
self.assertIsInstance(snake_case, snake_case)
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =Path(snake_case) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'}, open(snake_case, 'w'), )
_lowercase =AutoImageProcessor.from_pretrained(snake_case)
self.assertIsInstance(snake_case, snake_case)
def UpperCamelCase__ ( self :int):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case, 'clip-base is not a local folder and is not a valid model identifier'):
_lowercase =AutoImageProcessor.from_pretrained('clip-base')
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case, r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
_lowercase =AutoImageProcessor.from_pretrained(snake_case, revision='aaaaaa')
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
with self.assertRaisesRegex(
snake_case, 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.', ):
_lowercase =AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model')
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
with self.assertRaises(snake_case):
_lowercase =AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor')
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case):
_lowercase =AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor', trust_remote_code=snake_case)
_lowercase =AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor', trust_remote_code=snake_case)
self.assertEqual(image_processor.__class__.__name__, 'NewImageProcessor')
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case)
_lowercase =AutoImageProcessor.from_pretrained(snake_case, trust_remote_code=snake_case)
self.assertEqual(reloaded_image_processor.__class__.__name__, 'NewImageProcessor')
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
try:
AutoConfig.register('custom', snake_case)
AutoImageProcessor.register(snake_case, snake_case)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case):
AutoImageProcessor.register(snake_case, snake_case)
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =Path(snake_case) / 'preprocessor_config.json'
_lowercase =Path(snake_case) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'}, open(snake_case, 'w'), )
json.dump({'model_type': 'clip'}, open(snake_case, 'w'))
_lowercase =CustomImageProcessor.from_pretrained(snake_case)
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(snake_case)
_lowercase =AutoImageProcessor.from_pretrained(snake_case)
self.assertIsInstance(snake_case, snake_case)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self :str):
"""simple docstring"""
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] =True
try:
AutoConfig.register('custom', snake_case)
AutoImageProcessor.register(snake_case, snake_case)
# If remote code is not set, the default is to use local
_lowercase =AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor')
self.assertEqual(image_processor.__class__.__name__, 'NewImageProcessor')
self.assertTrue(image_processor.is_local)
# If remote code is disabled, we load the local one.
_lowercase =AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor', trust_remote_code=snake_case)
self.assertEqual(image_processor.__class__.__name__, 'NewImageProcessor')
self.assertTrue(image_processor.is_local)
# If remote is enabled, we load from the Hub
_lowercase =AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor', trust_remote_code=snake_case)
self.assertEqual(image_processor.__class__.__name__, 'NewImageProcessor')
self.assertTrue(not hasattr(snake_case, 'is_local'))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 181
| 1
|
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
_lowerCamelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def __magic_name__( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
_lowerCamelCase = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
_lowerCamelCase = s_dict.pop(__UpperCamelCase )
elif "subsample" in key:
_lowerCamelCase = s_dict.pop(__UpperCamelCase )
def __magic_name__( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase = emb.weight.shape
_lowerCamelCase = nn.Linear(__UpperCamelCase , __UpperCamelCase , bias=__UpperCamelCase )
_lowerCamelCase = emb.weight.data
return lin_layer
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
_lowerCamelCase = torch.load(__UpperCamelCase , map_location='''cpu''' )
_lowerCamelCase = mam_aaa["""args"""]
_lowerCamelCase = mam_aaa["""model"""]
_lowerCamelCase = state_dict["""decoder.output_projection.weight"""]
remove_ignore_keys_(__UpperCamelCase )
rename_keys(__UpperCamelCase )
_lowerCamelCase = state_dict["""decoder.embed_tokens.weight"""].shape[0]
_lowerCamelCase = args.share_decoder_input_output_embed
_lowerCamelCase = [int(__UpperCamelCase ) for i in args.conv_kernel_sizes.split(''',''' )]
_lowerCamelCase = SpeechaTextConfig(
vocab_size=__UpperCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(__UpperCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=__UpperCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__UpperCamelCase , num_beams=5 , max_length=200 , use_cache=__UpperCamelCase , decoder_start_token_id=2 , early_stopping=__UpperCamelCase , )
_lowerCamelCase = SpeechaTextForConditionalGeneration(__UpperCamelCase )
_lowerCamelCase = model.model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
if len(__UpperCamelCase ) > 0 and not set(__UpperCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
F' but all the following weights are missing {missing}' )
if tie_embeds:
_lowerCamelCase = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_lowerCamelCase = lm_head_weights
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
snake_case__ = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 721
|
import argparse
import json
import subprocess
def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
_lowerCamelCase = []
_lowerCamelCase = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
_lowerCamelCase = subprocess.run(__UpperCAmelCase , shell=__UpperCAmelCase , stdout=subprocess.PIPE )
_lowerCamelCase = output.stdout.decode('''utf-8''' )
_lowerCamelCase = json.loads(__UpperCAmelCase )
_lowerCamelCase = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(__UpperCAmelCase )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > 0:
_lowerCamelCase = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def __magic_name__( __UpperCAmelCase ) -> str:
'''simple docstring'''
return values.split(''',''' )
snake_case__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
snake_case__ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 638
| 0
|
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __UpperCAmelCase ( __lowercase , unittest.TestCase ):
__lowerCamelCase : int = RoFormerTokenizer
__lowerCamelCase : Any = RoFormerTokenizerFast
__lowerCamelCase : Optional[int] = True
__lowerCamelCase : str = True
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
super().setUp()
def UpperCAmelCase ( self : Optional[Any] , **a_ : Dict ) -> Dict:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self : str , **a_ : Union[str, Any] ) -> int:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self : int ) -> Optional[Any]:
'''simple docstring'''
a__ : Any = "永和服装饰品有限公司,今天天气非常好"
a__ : List[Any] = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
return input_text, output_text
def UpperCAmelCase ( self : List[Any] ) -> List[str]:
'''simple docstring'''
a__ : Optional[Any] = self.get_tokenizer()
a__ , a__ : List[Any] = self.get_chinese_input_output_texts()
a__ : Union[str, Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , output_text.split() )
a__ : List[Any] = tokens + [tokenizer.unk_token]
a__ : str = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
a__ : Any = self.get_rust_tokenizer()
a__ , a__ : List[str] = self.get_chinese_input_output_texts()
a__ : Optional[Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , output_text.split() )
a__ : List[Any] = tokens + [tokenizer.unk_token]
a__ : Tuple = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase ( self : List[str] ) -> int:
'''simple docstring'''
pass
def UpperCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
pass
| 642
|
def _SCREAMING_SNAKE_CASE ( snake_case ) -> str:
_UpperCAmelCase = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _SCREAMING_SNAKE_CASE ( snake_case ) -> dict[str, str]:
_UpperCAmelCase = [chr(i + 6_5 ) for i in range(2_6 )]
# Remove duplicate characters from key
_UpperCAmelCase = remove_duplicates(key.upper() )
_UpperCAmelCase = len(snake_case )
# First fill cipher with key characters
_UpperCAmelCase = {alphabet[i]: char for i, char in enumerate(snake_case )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(snake_case ) , 2_6 ):
_UpperCAmelCase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
_UpperCAmelCase = alphabet[i - offset]
_UpperCAmelCase = char
return cipher_alphabet
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> str:
return "".join(cipher_map.get(snake_case , snake_case ) for ch in message.upper() )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> str:
_UpperCAmelCase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(snake_case , snake_case ) for ch in message.upper() )
def _SCREAMING_SNAKE_CASE ( ) -> None:
_UpperCAmelCase = input("""Enter message to encode or decode: """ ).strip()
_UpperCAmelCase = input("""Enter keyword: """ ).strip()
_UpperCAmelCase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
_UpperCAmelCase = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
_UpperCAmelCase = create_cipher_map(snake_case )
print(func(snake_case , snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 518
| 0
|
from math import pi, sqrt
def lowerCamelCase_ ( _lowercase ) -> float:
if num <= 0:
raise ValueError("math domain error" )
if num > 1_71.5:
raise OverflowError("math range error" )
elif num - int(_lowercase ) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer" )
elif num == 0.5:
return sqrt(_lowercase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase_ ( ) -> None:
assert gamma(0.5 ) == sqrt(_lowercase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase = 1.0
while num:
UpperCamelCase = float(input('Gamma of: '))
print(F'''gamma({num}) = {gamma(num)}''')
print('\nEnter 0 to exit...')
| 387
|
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase , _lowercase=5 ) -> str:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count("<mask>" ) == 1
__A : Optional[Any] = torch.tensor(tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) ).unsqueeze(0 ) # Batch size 1
__A : List[str] = model(_lowercase )[0] # The last hidden-state is the first element of the output tuple
__A : Optional[int] = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__A : Optional[Any] = logits[0, masked_index, :]
__A : int = logits.softmax(dim=0 )
__A , __A : Union[str, Any] = prob.topk(k=_lowercase , dim=0 )
__A : Dict = " ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(_lowercase ) )] )
__A : Dict = tokenizer.mask_token
__A : str = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
__A : int = predicted_token_bpe.replace("\u2581" , " " )
if " {0}".format(_lowercase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(_lowercase ) , _lowercase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(_lowercase , _lowercase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
UpperCamelCase = CamembertTokenizer.from_pretrained('camembert-base')
UpperCamelCase = CamembertForMaskedLM.from_pretrained('camembert-base')
model.eval()
UpperCamelCase = 'Le camembert est <mask> :)'
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 387
| 1
|
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = DebertaTokenizer
lowerCamelCase__ = True
lowerCamelCase__ = DebertaTokenizerFast
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCAmelCase : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
_lowerCAmelCase : Any = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : Optional[int] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCAmelCase : str = {"unk_token": "[UNK]"}
_lowerCAmelCase : str = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : Dict = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file, "w", encoding="utf-8") as fp:
fp.write(json.dumps(__a) + "\n")
with open(self.merges_file, "w", encoding="utf-8") as fp:
fp.write("\n".join(__a))
def snake_case__ ( self, **__a):
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = "lower newer"
_lowerCAmelCase : int = "lower newer"
return input_text, output_text
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = "lower newer"
_lowerCAmelCase : Union[str, Any] = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
_lowerCAmelCase : Any = tokenizer.tokenize(__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
_lowerCAmelCase : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a), __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : int = tokenizer("Hello", "World")
_lowerCAmelCase : Optional[int] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"], __a)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.tokenizer_class.from_pretrained("microsoft/deberta-base")
_lowerCAmelCase : Optional[int] = tokenizer.encode("sequence builders", add_special_tokens=__a)
_lowerCAmelCase : Optional[int] = tokenizer.encode("multi-sequence build", add_special_tokens=__a)
_lowerCAmelCase : List[str] = tokenizer.encode(
"sequence builders", add_special_tokens=__a, add_prefix_space=__a)
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(
"sequence builders", "multi-sequence build", add_special_tokens=__a, add_prefix_space=__a)
_lowerCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__a)
_lowerCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__a, __a)
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class)
for tokenizer_class in tokenizer_classes:
_lowerCAmelCase : Optional[Any] = tokenizer_class.from_pretrained("microsoft/deberta-base")
_lowerCAmelCase : Optional[int] = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
_lowerCAmelCase : List[str] = tokenizer(__a, padding=__a)
_lowerCAmelCase : Dict = [tokenizer.decode(__a, skip_special_tokens=__a) for seq in encoding["input_ids"]]
# fmt: off
_lowerCAmelCase : Dict = {
"input_ids": [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
_lowerCAmelCase : Optional[Any] = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data, __a)
for expected, decoded in zip(__a, __a):
self.assertEqual(__a, __a)
| 500
|
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
for _ in range(_lowerCamelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_snake_case = list(range(10, 0, -1))
print(f'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 500
| 1
|
'''simple docstring'''
import os
import sys
import unittest
__A : Optional[int] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__A : Any = os.path.join(git_repo_path, 'src', 'diffusers')
class __UpperCamelCase ( unittest.TestCase ):
def a__ ( self :Optional[int] ):
snake_case_ : Any = find_backend(""" if not is_torch_available():""" )
self.assertEqual(_UpperCamelCase ,"""torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
snake_case_ : Optional[Any] = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(_UpperCamelCase ,"""torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
snake_case_ : Optional[Any] = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(_UpperCamelCase ,"""torch_and_transformers_and_onnx""" )
def a__ ( self :Tuple ):
snake_case_ : int = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" ,_UpperCamelCase )
self.assertIn("""torch_and_transformers""" ,_UpperCamelCase )
self.assertIn("""flax_and_transformers""" ,_UpperCamelCase )
self.assertIn("""torch_and_transformers_and_onnx""" ,_UpperCamelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""" ,objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""" ,objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""" ,objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""" ,objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""" ,objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""" ,objects["""torch_and_transformers_and_onnx"""] )
def a__ ( self :List[Any] ):
snake_case_ : int = create_dummy_object("""CONSTANT""" ,"""'torch'""" )
self.assertEqual(_UpperCamelCase ,"""\nCONSTANT = None\n""" )
snake_case_ : int = create_dummy_object("""function""" ,"""'torch'""" )
self.assertEqual(
_UpperCamelCase ,"""\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
snake_case_ : int = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
snake_case_ : Dict = create_dummy_object("""FakeClass""" ,"""'torch'""" )
self.assertEqual(_UpperCamelCase ,_UpperCamelCase )
def a__ ( self :int ):
snake_case_ : Optional[int] = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
snake_case_ : List[Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] ,_UpperCamelCase )
| 714
|
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def UpperCAmelCase ( lowerCamelCase_ :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[Any]=0 ):
'''simple docstring'''
# Format the message.
if name is None:
snake_case_ : Tuple = None
else:
snake_case_ : Optional[Any] = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
snake_case_ : Optional[Any] = fmt.format(lowerCamelCase_ )
# Print and recurse (if needed).
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
if msg is not None:
print(lowerCamelCase_ )
for k in val.keys():
recursive_print(lowerCamelCase_ , val[k] , spaces + 2 )
elif isinstance(lowerCamelCase_ , torch.Tensor ):
print(lowerCamelCase_ , """:""" , val.size() )
else:
print(lowerCamelCase_ , """:""" , lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :int ):
'''simple docstring'''
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
snake_case_ : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case_ : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case_ : List[Any] = param.view(*lowerCamelCase_ )
snake_case_ : Tuple = param.transpose(0 , 2 )
snake_case_ : List[str] = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case_ : Tuple = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case_ : int = param.view(*lowerCamelCase_ )
snake_case_ : str = param.transpose(0 , 1 ).contiguous()
snake_case_ : int = param.view(*lowerCamelCase_ )
return param
def UpperCAmelCase ( lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Tuple ):
'''simple docstring'''
# The converted output model.
snake_case_ : Tuple = {}
# old versions did not store training args
snake_case_ : Optional[Any] = input_state_dict.get("""args""" , lowerCamelCase_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case_ : Optional[int] = ds_args.padded_vocab_size
snake_case_ : str = ds_args.max_position_embeddings
snake_case_ : Tuple = ds_args.hidden_size
snake_case_ : List[str] = ds_args.num_layers
snake_case_ : Union[str, Any] = ds_args.num_attention_heads
snake_case_ : Tuple = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case_ : int = config.n_head
# The hidden_size per head.
snake_case_ : Any = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case_ : Tuple = input_state_dict["""checkpoint_version"""]
else:
snake_case_ : Dict = 0.0
# The model.
snake_case_ : Optional[Any] = input_state_dict["""model"""]
# The language model.
snake_case_ : Optional[Any] = model["""language_model"""]
# The embeddings.
snake_case_ : int = lm["""embedding"""]
# The word embeddings.
snake_case_ : Any = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
snake_case_ : Any = word_embeddings[: config.vocab_size, :]
snake_case_ : Union[str, Any] = word_embeddings
# The position embeddings.
snake_case_ : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case_ : List[str] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
snake_case_ : List[str] = pos_embeddings
# The transformer.
snake_case_ : str = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
snake_case_ : Union[str, Any] = re.compile(R"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
snake_case_ : Tuple = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case_ : List[Any] = layer_re.match(lowerCamelCase_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case_ : Any = int(m.group(1 ) )
# The name of the operation.
snake_case_ : Any = m.group(2 )
# Is it a weight or a bias?
snake_case_ : List[str] = m.group(3 )
# The name of the layer.
snake_case_ : List[Any] = F'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
snake_case_ : str = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
snake_case_ : Optional[int] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case_ : int = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , lowerCamelCase_ , lowerCamelCase_ )
snake_case_ : Any = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case_ : List[str] = torch.tensor(-1E4 , dtype=torch.floataa )
snake_case_ : Optional[Any] = masked_bias
snake_case_ : Optional[Any] = fix_query_key_value_ordering(lowerCamelCase_ , lowerCamelCase_ , 3 , lowerCamelCase_ , lowerCamelCase_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case_ : Any = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case_ : int = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case_ : Dict = fix_query_key_value_ordering(lowerCamelCase_ , lowerCamelCase_ , 3 , lowerCamelCase_ , lowerCamelCase_ )
# Store. No change of shape.
snake_case_ : Tuple = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case_ : str = megatron_to_transformers[op_name]
snake_case_ : Union[str, Any] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case_ : str = megatron_to_transformers[op_name]
snake_case_ : List[str] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case_ : str = transformer["""final_layernorm.weight"""]
snake_case_ : int = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case_ : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def UpperCAmelCase ( ):
'''simple docstring'''
# Create the argument parser.
snake_case_ : str = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=lowerCamelCase_ , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=lowerCamelCase_ , help="""An optional config json file describing the pre-trained model.""" , )
snake_case_ : Tuple = parser.parse_args()
# Extract the basename.
snake_case_ : List[str] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
snake_case_ : Dict = torch.load(lowerCamelCase_ , map_location="""cpu""" )
else:
snake_case_ : Optional[Any] = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
snake_case_ : Optional[int] = input_state_dict.get("""args""" , lowerCamelCase_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case_ : Tuple = """gelu_fast"""
elif ds_args.openai_gelu:
snake_case_ : int = """gelu_new"""
else:
snake_case_ : Optional[Any] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
snake_case_ : int = """gelu_new"""
# Spell out all parameters in case the defaults change.
snake_case_ : int = GPTaConfig(
vocab_size=5_02_57 , n_positions=10_24 , n_embd=10_24 , n_layer=24 , n_head=16 , n_inner=40_96 , activation_function=lowerCamelCase_ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=lowerCamelCase_ , summary_activation=lowerCamelCase_ , summary_proj_to_labels=lowerCamelCase_ , summary_first_dropout=0.1 , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , bos_token_id=5_02_56 , eos_token_id=5_02_56 , )
else:
snake_case_ : int = GPTaConfig.from_json_file(args.config_file )
snake_case_ : Optional[int] = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
snake_case_ : Optional[Any] = convert_megatron_checkpoint(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(lowerCamelCase_ , lowerCamelCase_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case_ : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case_ : List[str] = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
snake_case_ : str = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
snake_case_ : List[str] = """gpt2"""
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
snake_case_ : int = type(lowerCamelCase_ ).__name__
snake_case_ : Union[str, Any] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(lowerCamelCase_ )
# Save tokenizer based on args
print(F'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(lowerCamelCase_ )
# Store the state_dict to file.
snake_case_ : Optional[Any] = os.path.join(lowerCamelCase_ , """pytorch_model.bin""" )
print(F'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(lowerCamelCase_ , lowerCamelCase_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 267
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = len(__lowerCAmelCase )
for i in range(length - 1 ):
lowercase_ = i
for k in range(i + 1 , __lowerCAmelCase ):
if collection[k] < collection[least]:
lowercase_ = k
if least != i:
lowercase_ , lowercase_ = (collection[i], collection[least])
return collection
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase : List[Any] = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 567
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase : List[str] = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "facebook/nllb-200-distilled-600M"
lowercase__ = (
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
lowercase__ = "translator"
lowercase__ = AutoTokenizer
lowercase__ = AutoModelForSeqaSeqLM
lowercase__ = LANGUAGE_CODES
lowercase__ = ["text", "text", "text"]
lowercase__ = ["text"]
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''')
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''')
lowercase_ = self.lang_to_code[src_lang]
lowercase_ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowerCAmelCase_ , return_tensors="""pt""" , src_lang=lowerCAmelCase_ , tgt_lang=lowerCAmelCase_)
def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : int):
"""simple docstring"""
return self.model.generate(**lowerCAmelCase_)
def _UpperCAmelCase ( self : str , lowerCAmelCase_ : str):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowerCAmelCase_)
| 567
| 1
|
import qiskit
def lowerCamelCase_ ( _lowercase = 2 ) -> qiskit.result.counts.Counts:
__A : List[str] = qubits
# Using Aer's simulator
__A : Any = qiskit.Aer.get_backend("aer_simulator" )
# Creating a Quantum Circuit acting on the q register
__A : Dict = qiskit.QuantumCircuit(_lowercase , _lowercase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , _lowercase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , _lowercase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(_lowercase ) ) , list(range(_lowercase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
__A : Any = qiskit.execute(_lowercase , _lowercase , shots=1_000 )
return job.result().get_counts(_lowercase )
if __name__ == "__main__":
print(F'''Total count for various states are: {quantum_entanglement(3)}''')
| 702
|
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=18 , __UpperCAmelCase=30 , __UpperCAmelCase=400 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=False , ):
__A : Tuple = size if size is not None else {"height": 20, "width": 20}
__A : Tuple = crop_size if crop_size is not None else {"height": 18, "width": 18}
__A : int = parent
__A : List[Any] = batch_size
__A : Tuple = num_channels
__A : Any = image_size
__A : Optional[int] = min_resolution
__A : Any = max_resolution
__A : str = do_resize
__A : Tuple = size
__A : Tuple = do_center_crop
__A : Union[str, Any] = crop_size
__A : Tuple = do_normalize
__A : Union[str, Any] = image_mean
__A : Dict = image_std
__A : Optional[Any] = do_reduce_labels
def __UpperCAmelCase( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCamelCase_ ( ) -> str:
__A : List[str] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A : Optional[Any] = Image.open(dataset[0]["file"] )
__A : Union[str, Any] = Image.open(dataset[1]["file"] )
return image, map
def lowerCamelCase_ ( ) -> Dict:
__A : str = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
__A : List[Any] = Image.open(ds[0]["file"] )
__A : Union[str, Any] = Image.open(ds[1]["file"] )
__A : Optional[Any] = Image.open(ds[2]["file"] )
__A : str = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Tuple = BeitImageProcessor if is_vision_available() else None
def __UpperCAmelCase( self ):
__A : Tuple = BeitImageProcessingTester(self )
@property
def __UpperCAmelCase( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase( self ):
__A : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_center_crop" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "center_crop" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "image_std" ) )
def __UpperCAmelCase( self ):
__A : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels , __UpperCAmelCase )
__A : str = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__UpperCAmelCase )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels , __UpperCAmelCase )
def __UpperCAmelCase( self ):
pass
def __UpperCAmelCase( self ):
# Initialize image_processing
__A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
__A : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__A : List[Any] = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCAmelCase( self ):
# Initialize image_processing
__A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
__A : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__A : List[Any] = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCAmelCase( self ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
__A : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__A : Union[str, Any] = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCAmelCase( self ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
__A : Tuple = []
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__A : str = image_processing(image_inputs[0] , maps[0] , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched
__A : Any = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test not batched input (PIL images)
__A , __A : Optional[Any] = prepare_semantic_single_inputs()
__A : Dict = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched input (PIL images)
__A , __A : List[Any] = prepare_semantic_batch_inputs()
__A : Tuple = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
def __UpperCAmelCase( self ):
# Initialize image_processing
__A : Tuple = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__A , __A : List[Any] = prepare_semantic_single_inputs()
__A : Optional[int] = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 150 )
__A : Optional[Any] = True
__A : int = image_processing(__UpperCAmelCase , __UpperCAmelCase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
| 387
| 0
|
def lowercase ( __A : Optional[int] , __A : Optional[Any] , __A : Tuple=False ) -> List[str]:
'''simple docstring'''
if isinstance(__A , __A ) and isinstance(__A , __A ):
snake_case : List[Any] = len(set_a.intersection(__A ) )
if alternative_union:
snake_case : Any = len(__A ) + len(__A )
else:
snake_case : List[str] = len(set_a.union(__A ) )
return intersection / union
if isinstance(__A , (list, tuple) ) and isinstance(__A , (list, tuple) ):
snake_case : int = [element for element in set_a if element in set_b]
if alternative_union:
snake_case : Any = len(__A ) + len(__A )
return len(__A ) / union
else:
snake_case : Any = set_a + [element for element in set_b if element not in set_a]
return len(__A ) / len(__A )
return len(__A ) / len(__A )
return None
if __name__ == "__main__":
__lowercase : str = {'''a''', '''b''', '''c''', '''d''', '''e'''}
__lowercase : List[str] = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 36
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase_ ( __A : str , __A : str , __A : str , __A : PreTrainedTokenizer , __A : int , __A : Optional[int] = None , ):
'''simple docstring'''
snake_case: Union[str, Any] = {}
if train_file is not None:
snake_case: Any = [train_file]
if eval_file is not None:
snake_case: Dict = [eval_file]
if test_file is not None:
snake_case: List[str] = [test_file]
snake_case: Tuple = datasets.load_dataset('csv' , data_files=__A )
snake_case: Optional[Any] = list(ds[list(files.keys() )[0]].features.keys() )
snake_case: Optional[int] = features_name.pop(__A )
snake_case: Any = list(set(ds[list(files.keys() )[0]][label_name] ) )
snake_case: Dict = {label: i for i, label in enumerate(__A )}
snake_case: Optional[Any] = tokenizer.model_input_names
snake_case: int = {}
if len(__A ) == 1:
for k in files.keys():
snake_case: List[str] = ds[k].map(
lambda __A : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__A , max_length=__A , padding='max_length' ) , batched=__A , )
elif len(__A ) == 2:
for k in files.keys():
snake_case: Union[str, Any] = ds[k].map(
lambda __A : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__A , max_length=__A , padding='max_length' , ) , batched=__A , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
snake_case: int = {k: v for k, v in ex.items() if k in input_names}
snake_case: List[str] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
snake_case: Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
snake_case: Dict = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
snake_case: Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
snake_case: Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
snake_case: Dict = (
tf.data.Dataset.from_generator(
__A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
snake_case: Tuple = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
snake_case: str = (
tf.data.Dataset.from_generator(
__A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
snake_case: str = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
snake_case: int = (
tf.data.Dataset.from_generator(
__A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
snake_case: str = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
__UpperCamelCase = field(metadata={"help": "Which column contains the label"} )
__UpperCamelCase = field(default=snake_case , metadata={"help": "The path of the training file"} )
__UpperCamelCase = field(default=snake_case , metadata={"help": "The path of the development file"} )
__UpperCamelCase = field(default=snake_case , metadata={"help": "The path of the test file"} )
__UpperCamelCase = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
__UpperCamelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__UpperCamelCase = field(default=snake_case , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__UpperCamelCase = field(
default=snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def lowerCAmelCase_ ( ):
'''simple docstring'''
snake_case: List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
snake_case , snake_case , snake_case: int = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case: Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case , snake_case , snake_case , snake_case: Any = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__A , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
snake_case: List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__A ) , labelaid=__A , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='text-classification' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
snake_case: List[str] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('.bin' in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , )
def compute_metrics(__A : EvalPrediction ) -> Dict:
snake_case: Optional[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
snake_case: Dict = TFTrainer(
model=__A , args=__A , train_dataset=__A , eval_dataset=__A , compute_metrics=__A , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case: str = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
snake_case: Any = trainer.evaluate()
snake_case: List[str] = os.path.join(training_args.output_dir , 'eval_results.txt' )
with open(__A , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(__A )
return results
if __name__ == "__main__":
main()
| 329
| 0
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowerCamelCase_ : Dict = datasets.logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = "\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",\n author = \"Moosavi, Nafise Sadat and\n Strube, Michael\",\n booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",\n month = aug,\n year = \"2016\",\n address = \"Berlin, Germany\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/P16-1060\",\n doi = \"10.18653/v1/P16-1060\",\n pages = \"632--642\",\n}\n\n"
lowerCamelCase_ : List[str] = "\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n"
lowerCamelCase_ : str = "\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting 'keep_singletons=False', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n 'mentions': mentions\n 'muc': MUC metric [Vilain et al, 1995]\n 'bcub': B-cubed [Bagga and Baldwin, 1998]\n 'ceafe': CEAFe [Luo et al., 2005]\n 'lea': LEA [Moosavi and Strube, 2016]\n 'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric('coval')\n >>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',\n ... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',\n ... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',\n ... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',\n ... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',\n ... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {'mentions/recall': 1.0,[...] 'conll_score': 100.0}\n"
def __lowercase( __snake_case : Union[str, Any] ,__snake_case : Any ,__snake_case : List[str]=False ,__snake_case : List[Any]=False ,__snake_case : Any=True ,__snake_case : Dict=False ,__snake_case : Union[str, Any]="dummy_doc" ) -> List[str]:
__snake_case = {doc: key_lines}
__snake_case = {doc: sys_lines}
__snake_case = {}
__snake_case = 0
__snake_case = 0
__snake_case = 0
__snake_case = 0
__snake_case = 0
__snake_case = 0
__snake_case , __snake_case = reader.get_doc_mentions(__snake_case ,key_doc_lines[doc] ,__snake_case )
key_singletons_num += singletons_num
if NP_only or min_span:
__snake_case = reader.set_annotated_parse_trees(__snake_case ,key_doc_lines[doc] ,__snake_case ,__snake_case )
__snake_case , __snake_case = reader.get_doc_mentions(__snake_case ,sys_doc_lines[doc] ,__snake_case )
sys_singletons_num += singletons_num
if NP_only or min_span:
__snake_case = reader.set_annotated_parse_trees(__snake_case ,key_doc_lines[doc] ,__snake_case ,__snake_case )
if remove_nested:
__snake_case , __snake_case = reader.remove_nested_coref_mentions(__snake_case ,__snake_case )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__snake_case , __snake_case = reader.remove_nested_coref_mentions(__snake_case ,__snake_case )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__snake_case = reader.get_mention_assignments(__snake_case ,__snake_case )
__snake_case = reader.get_mention_assignments(__snake_case ,__snake_case )
__snake_case = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
f'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''' )
logger.info(
'Number of resulting singleton clusters in the key '
f'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''' )
if not keep_singletons:
logger.info(
f'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '''
'files, respectively' )
return doc_coref_infos
def __lowercase( __snake_case : Optional[Any] ,__snake_case : Any ,__snake_case : List[Any] ,__snake_case : str ,__snake_case : Optional[Any] ,__snake_case : Optional[Any] ,__snake_case : Optional[Any] ) -> Dict:
__snake_case = get_coref_infos(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case )
__snake_case = {}
__snake_case = 0
__snake_case = 0
for name, metric in metrics:
__snake_case , __snake_case , __snake_case = evaluator.evaluate_documents(__snake_case ,__snake_case ,beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'''{name}/recall''': recall, f'''{name}/precision''': precision, f'''{name}/f1''': fa} )
logger.info(
name.ljust(10 ) ,f'''Recall: {recall * 1_00:.2f}''' ,f''' Precision: {precision * 1_00:.2f}''' ,f''' F1: {fa * 1_00:.2f}''' ,)
if conll_subparts_num == 3:
__snake_case = (conll / 3) * 1_00
logger.info(f'''CoNLL score: {conll:.2f}''' )
output_scores.update({'conll_score': conll} )
return output_scores
def __lowercase( __snake_case : List[Any] ) -> Any:
__snake_case = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
__snake_case = line.split()[5]
if not parse_col == "-":
__snake_case = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase (datasets.Metric ):
def __lowerCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ):
__snake_case = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
__snake_case = util.check_gold_parse_annotation(SCREAMING_SNAKE_CASE_ )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__snake_case = evaluate(
key_lines=SCREAMING_SNAKE_CASE_ , sys_lines=SCREAMING_SNAKE_CASE_ , metrics=SCREAMING_SNAKE_CASE_ , NP_only=SCREAMING_SNAKE_CASE_ , remove_nested=SCREAMING_SNAKE_CASE_ , keep_singletons=SCREAMING_SNAKE_CASE_ , min_span=SCREAMING_SNAKE_CASE_ , )
return score
| 715
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class _lowerCamelCase (unittest.TestCase ):
def __lowerCamelCase ( self ):
__snake_case = tempfile.mkdtemp()
__snake_case = BlipImageProcessor()
__snake_case = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
__snake_case = BlipaProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).tokenizer
def __lowerCamelCase ( self , **SCREAMING_SNAKE_CASE_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ).image_processor
def __lowerCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self ):
__snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self ):
__snake_case = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__snake_case = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
__snake_case = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__snake_case = self.prepare_image_inputs()
__snake_case = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
__snake_case = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCamelCase ( self ):
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__snake_case = 'lower newer'
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer(SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCamelCase ( self ):
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__snake_case = 'lower newer'
__snake_case = self.prepare_image_inputs()
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def __lowerCamelCase ( self ):
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
__snake_case = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( self ):
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = BlipaProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
__snake_case = 'lower newer'
__snake_case = self.prepare_image_inputs()
__snake_case = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 345
| 0
|
def _UpperCamelCase (a__ :float ):
"""simple docstring"""
return 10 - x * x
def _UpperCamelCase (a__ :float , a__ :float ):
"""simple docstring"""
if equation(a__ ) * equation(a__ ) >= 0:
raise ValueError("""Wrong space!""" )
UpperCamelCase__ = a
while (b - a) >= 0.01:
# Find middle point
UpperCamelCase__ = (a + b) / 2
# Check if middle point is root
if equation(a__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(a__ ) * equation(a__ ) < 0:
UpperCamelCase__ = c
else:
UpperCamelCase__ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 619
|
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : Any =get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase : int = GPTSwaTokenizer
lowercase : Union[str, Any] = False
lowercase : Dict = True
lowercase : int = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
A : Dict =GPTSwaTokenizer(SCREAMING_SNAKE_CASE__ , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
A : Union[str, Any] ='This is a test'
A : str ='This is a test'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Tuple:
A : int ='<s>'
A : Optional[Any] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any:
A : Dict =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , 20_00 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 20_00 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Dict:
A : Union[str, Any] =GPTSwaTokenizer(SCREAMING_SNAKE_CASE__ )
A : str =tokenizer.tokenize('This is a test' )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , [4_65, 2_87, 2_65, 6_31, 8_42] )
A : Dict =tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
A : int =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , )
A : List[str] =tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# fmt: off
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[str]:
A : Dict =GPTSwaTokenizer(SCREAMING_SNAKE_CASE__ )
A : Tuple =['This is a test', 'I was born in 92000, and this is falsé.']
A : int =[
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertListEqual(tokenizer.encode_fast(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(tokenizer.decode_fast(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Tuple:
A : Optional[int] =[
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
A : Any ={'input_ids': [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ , model_name='AI-Sweden/gpt-sw3-126m' , sequences=SCREAMING_SNAKE_CASE__ , )
| 305
| 0
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = ['pixel_values']
def __init__( self, __a = True, __a = None, __a = PILImageResampling.BILINEAR, __a = True, __a = 1 / 255, __a = True, __a = None, __a = True, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : int = size if size is not None else {"shortest_edge": 224}
_lowerCAmelCase : Optional[int] = get_size_dict(__a, default_to_square=__a)
_lowerCAmelCase : Union[str, Any] = crop_size if crop_size is not None else {"height": 256, "width": 256}
_lowerCAmelCase : Dict = get_size_dict(__a, param_name="crop_size")
_lowerCAmelCase : Any = do_resize
_lowerCAmelCase : Optional[int] = size
_lowerCAmelCase : int = resample
_lowerCAmelCase : Union[str, Any] = do_rescale
_lowerCAmelCase : int = rescale_factor
_lowerCAmelCase : str = do_center_crop
_lowerCAmelCase : Any = crop_size
_lowerCAmelCase : str = do_flip_channel_order
def snake_case__ ( self, __a, __a, __a = PIL.Image.BILINEAR, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = get_size_dict(__a, default_to_square=__a)
if "shortest_edge" not in size:
raise ValueError(f"The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}")
_lowerCAmelCase : int = get_resize_output_image_size(__a, size=size["shortest_edge"], default_to_square=__a)
return resize(__a, size=__a, resample=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Any = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(__a, size=(size["height"], size["width"]), data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a = None, **__a, ):
'''simple docstring'''
return rescale(__a, scale=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return flip_channel_order(__a, data_format=__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = ChannelDimension.FIRST, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : Any = resample if resample is not None else self.resample
_lowerCAmelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase : List[str] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_lowerCAmelCase : Optional[int] = size if size is not None else self.size
_lowerCAmelCase : List[Any] = get_size_dict(__a, default_to_square=__a)
_lowerCAmelCase : List[Any] = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase : Union[str, Any] = get_size_dict(__a, param_name="crop_size")
_lowerCAmelCase : Optional[int] = make_list_of_images(__a)
if not valid_images(__a):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
# All transformations expect numpy arrays.
_lowerCAmelCase : int = [to_numpy_array(__a) for image in images]
if do_resize:
_lowerCAmelCase : int = [self.resize(image=__a, size=__a, resample=__a) for image in images]
if do_center_crop:
_lowerCAmelCase : List[Any] = [self.center_crop(image=__a, size=__a) for image in images]
if do_rescale:
_lowerCAmelCase : Union[str, Any] = [self.rescale(image=__a, scale=__a) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_lowerCAmelCase : List[str] = [self.flip_channel_order(image=__a) for image in images]
_lowerCAmelCase : Union[str, Any] = [to_channel_dimension_format(__a, __a) for image in images]
_lowerCAmelCase : Tuple = {"pixel_values": images}
return BatchFeature(data=__a, tensor_type=__a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[str] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__a) != len(__a):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits")
if is_torch_tensor(__a):
_lowerCAmelCase : Tuple = target_sizes.numpy()
_lowerCAmelCase : int = []
for idx in range(len(__a)):
_lowerCAmelCase : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=__a)
_lowerCAmelCase : Tuple = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(__a)
else:
_lowerCAmelCase : List[str] = logits.argmax(dim=1)
_lowerCAmelCase : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 658
|
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "https://openaipublic.azureedge.net/jukebox/models/"
_snake_case = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Union[str, Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : List[str] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
_lowerCAmelCase : int = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : int = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
_lowerCAmelCase : Tuple = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {}
import re
_lowerCAmelCase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : Optional[int] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Tuple = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : str = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_lowerCAmelCase : Optional[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : int = prefix + resnet_block
_lowerCAmelCase : int = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_lowerCAmelCase : str = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : str = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Union[str, Any] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_lowerCAmelCase : Optional[int] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : Dict = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_lowerCAmelCase : Any = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : List[str] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_lowerCAmelCase : Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : Optional[Any] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_lowerCAmelCase : List[str] = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Tuple = replace_key(_lowerCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_lowerCAmelCase : Any = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_lowerCAmelCase : Tuple = original_key
_lowerCAmelCase : List[Any] = original_key
_lowerCAmelCase : Optional[int] = value
return new_dict
@torch.no_grad()
def A ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_lowerCAmelCase : List[Any] = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
_lowerCAmelCase : Optional[Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
_lowerCAmelCase : Tuple = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[Any] = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Any = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
_lowerCAmelCase : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
_lowerCAmelCase : Dict = old_dic[k]
elif k.endswith(".w" ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Union[str, Any] = old_dic[k]
_lowerCAmelCase : Union[str, Any] = "vqvae" if i == 0 else F"priors.{3 - i}"
_lowerCAmelCase : Union[str, Any] = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 658
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( A_ ):
_UpperCAmelCase =['''pixel_values''']
def __init__( self: int , a: bool = True , a: Optional[Dict[str, int]] = None , a: PILImageResampling = PILImageResampling.BICUBIC , a: bool = True , a: bool = True , a: Union[int, float] = 1 / 2_55 , a: Dict[str, int] = None , a: bool = True , a: Optional[Union[float, List[float]]] = None , a: Optional[Union[float, List[float]]] = None , **a: Tuple , ) ->Any:
'''simple docstring'''
super().__init__(**_lowerCamelCase)
a_ = size if size is not None else {"height": 2_24, "width": 2_24}
a_ = get_size_dict(_lowerCamelCase)
a_ = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
a_ = get_size_dict(_lowerCamelCase , default_to_square=_lowerCamelCase , param_name="crop_size")
a_ = do_resize
a_ = do_rescale
a_ = do_normalize
a_ = do_center_crop
a_ = crop_size
a_ = size
a_ = resample
a_ = rescale_factor
a_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
a_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _lowerCAmelCase ( self: int , a: np.ndarray , a: Dict[str, int] , a: PILImageResampling = PILImageResampling.BILINEAR , a: Optional[Union[str, ChannelDimension]] = None , **a: Union[str, Any] , ) ->Any:
'''simple docstring'''
a_ = get_size_dict(_lowerCamelCase)
if "shortest_edge" in size:
a_ = get_resize_output_image_size(_lowerCamelCase , size=size["shortest_edge"] , default_to_square=_lowerCamelCase)
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
a_ = (size["height"], size["width"])
else:
raise ValueError(f"""Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}""")
return resize(_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase)
def _lowerCAmelCase ( self: List[Any] , a: np.ndarray , a: Dict[str, int] , a: Optional[Union[str, ChannelDimension]] = None , **a: int , ) ->Optional[Any]:
'''simple docstring'''
a_ = get_size_dict(_lowerCamelCase)
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""")
return center_crop(_lowerCamelCase , size=(size["height"], size["width"]) , data_format=_lowerCamelCase , **_lowerCamelCase)
def _lowerCAmelCase ( self: str , a: np.ndarray , a: float , a: Optional[Union[str, ChannelDimension]] = None , **a: Optional[Any]) ->Dict:
'''simple docstring'''
return rescale(_lowerCamelCase , scale=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase)
def _lowerCAmelCase ( self: Tuple , a: np.ndarray , a: Union[float, List[float]] , a: Union[float, List[float]] , a: Optional[Union[str, ChannelDimension]] = None , **a: Any , ) ->Any:
'''simple docstring'''
return normalize(_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase , data_format=_lowerCamelCase , **_lowerCamelCase)
def _lowerCAmelCase ( self: Dict , a: ImageInput , a: Optional[bool] = None , a: Dict[str, int] = None , a: PILImageResampling = None , a: bool = None , a: int = None , a: Optional[bool] = None , a: Optional[float] = None , a: Optional[bool] = None , a: Optional[Union[float, List[float]]] = None , a: Optional[Union[float, List[float]]] = None , a: Optional[Union[str, TensorType]] = None , a: Union[str, ChannelDimension] = ChannelDimension.FIRST , **a: Tuple , ) ->int:
'''simple docstring'''
a_ = do_resize if do_resize is not None else self.do_resize
a_ = do_rescale if do_rescale is not None else self.do_rescale
a_ = do_normalize if do_normalize is not None else self.do_normalize
a_ = do_center_crop if do_center_crop is not None else self.do_center_crop
a_ = crop_size if crop_size is not None else self.crop_size
a_ = get_size_dict(_lowerCamelCase , param_name="crop_size" , default_to_square=_lowerCamelCase)
a_ = resample if resample is not None else self.resample
a_ = rescale_factor if rescale_factor is not None else self.rescale_factor
a_ = image_mean if image_mean is not None else self.image_mean
a_ = image_std if image_std is not None else self.image_std
a_ = size if size is not None else self.size
a_ = get_size_dict(_lowerCamelCase)
if not is_batched(_lowerCamelCase):
a_ = [images]
if not valid_images(_lowerCamelCase):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
# All transformations expect numpy arrays.
a_ = [to_numpy_array(_lowerCamelCase) for image in images]
if do_resize:
a_ = [self.resize(image=_lowerCamelCase , size=_lowerCamelCase , resample=_lowerCamelCase) for image in images]
if do_center_crop:
a_ = [self.center_crop(image=_lowerCamelCase , size=_lowerCamelCase) for image in images]
if do_rescale:
a_ = [self.rescale(image=_lowerCamelCase , scale=_lowerCamelCase) for image in images]
if do_normalize:
a_ = [self.normalize(image=_lowerCamelCase , mean=_lowerCamelCase , std=_lowerCamelCase) for image in images]
a_ = [to_channel_dimension_format(_lowerCamelCase , _lowerCamelCase) for image in images]
a_ = {"pixel_values": images}
return BatchFeature(data=_lowerCamelCase , tensor_type=_lowerCamelCase)
| 685
|
"""simple docstring"""
from collections import deque
from .hash_table import HashTable
class lowerCAmelCase__ ( A_ ):
def __init__( self : Tuple , *_lowerCamelCase : Optional[Any] , **_lowerCamelCase : Dict ):
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
def lowercase ( self : Tuple , _lowerCamelCase : Any , _lowerCamelCase : List[Any] ):
_snake_case = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_lowerCamelCase )
_snake_case = self.values[key]
def lowercase ( self : int ):
return (
sum(self.charge_factor - len(_lowerCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def lowercase ( self : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : List[str]=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_lowerCamelCase ) == 0
):
return key
return super()._collision_resolution(_lowerCamelCase , _lowerCamelCase )
| 224
| 0
|
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
for param in module.parameters():
__UpperCAmelCase : List[Any] = False
def _lowercase ( ) -> str:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__UpperCAmelCase : List[str] = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def _lowercase ( lowerCamelCase__ ) -> int:
"""simple docstring"""
__UpperCAmelCase : List[str] = plt.imshow(lowerCamelCase__ )
fig.axes.get_xaxis().set_visible(lowerCamelCase__ )
fig.axes.get_yaxis().set_visible(lowerCamelCase__ )
plt.show()
def _lowercase ( ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = datetime.now()
__UpperCAmelCase : int = current_time.strftime("%H:%M:%S" )
return timestamp
| 10
|
'''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
__UpperCAmelCase : Tuple = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) )
return round(lowerCamelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10
| 1
|
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class lowerCamelCase_( yaml.SafeLoader ):
'''simple docstring'''
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = [self.constructed_objects[key_node] for key_node, _ in node.value]
_lowerCamelCase = [tuple(__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else key for key in keys]
_lowerCamelCase = Counter(__UpperCAmelCase )
_lowerCamelCase = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=False ):
_lowerCamelCase = super().construct_mapping(__UpperCAmelCase , deep=__UpperCAmelCase )
self._check_no_duplicates_on_constructed_node(__UpperCAmelCase )
return mapping
def lowerCAmelCase_( lowercase_ : Union[str, Any] ) -> List[str]:
_lowerCamelCase = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
_lowerCamelCase = full_content[1:].index('''---''' ) + 1
_lowerCamelCase = """\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowercase_ )
class lowerCamelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowercase__ : Dict = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def snake_case__ ( cls , lowerCamelCase__ ):
with open(__UpperCAmelCase , encoding='''utf-8''' ) as readme_file:
_lowerCamelCase = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__UpperCAmelCase )
else:
return cls()
def snake_case__ ( self , lowerCamelCase__ ):
if path.exists():
with open(__UpperCAmelCase , encoding='''utf-8''' ) as readme_file:
_lowerCamelCase = readme_file.read()
else:
_lowerCamelCase = None
_lowerCamelCase = self._to_readme(__UpperCAmelCase )
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(__UpperCAmelCase )
def snake_case__ ( self , lowerCamelCase__ = None ):
if readme_content is not None:
_lowerCamelCase = _split_yaml_from_readme(__UpperCAmelCase )
_lowerCamelCase = """---\n""" + self.to_yaml_string() + """---\n""" + content
else:
_lowerCamelCase = """---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def snake_case__ ( cls , lowerCamelCase__ ):
_lowerCamelCase = yaml.load(__UpperCAmelCase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
_lowerCamelCase = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__UpperCAmelCase )
def snake_case__ ( self ):
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__UpperCAmelCase , allow_unicode=__UpperCAmelCase , encoding='''utf-8''' , ).decode('''utf-8''' )
__SCREAMING_SNAKE_CASE : Dict = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__SCREAMING_SNAKE_CASE : Union[str, Any] = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
__SCREAMING_SNAKE_CASE : Dict = ap.parse_args()
__SCREAMING_SNAKE_CASE : str = Path(args.readme_filepath)
__SCREAMING_SNAKE_CASE : Optional[Any] = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 661
|
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
lowerCAmelCase__ : Any = _modexpt(UpperCamelCase , exponent // 2 , UpperCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(UpperCamelCase , exponent - 1 , UpperCamelCase )) % modulo_value
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 1777 , UpperCamelCase = 1855 , UpperCamelCase = 8 ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = base
for _ in range(1 , UpperCamelCase ):
lowerCAmelCase__ : Optional[Any] = _modexpt(UpperCamelCase , UpperCamelCase , 10**digits )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 565
| 0
|
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
_snake_case = ""
if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"):
class UpperCAmelCase_ ( tr.AbstractTransform):
def __init__( self, __a = " "):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = sentence_delimiter
def snake_case__ ( self, __a):
'''simple docstring'''
return list(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int = []
for sent_idx, sentence in enumerate(__a):
chars.extend(self.process_string(__a))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__a) - 1:
chars.append(self.sentence_delimiter)
return chars
_snake_case = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
_snake_case = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
_snake_case = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
_snake_case = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"
_snake_case = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Value("string", id="sequence"),
}), codebase_urls=["https://github.com/jitsi/jiwer/"], reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
], )
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
__a, __a, truth_transform=__a, hypothesis_transform=__a, )["wer"]
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Tuple = 0
for prediction, reference in zip(__a, __a):
_lowerCAmelCase : Any = jiwer.compute_measures(
__a, __a, truth_transform=__a, hypothesis_transform=__a, )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 658
|
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 658
| 1
|
from __future__ import annotations
from math import ceil, floor, sqrt
def snake_case ( lowerCamelCase = 2_000_000 ):
'''simple docstring'''
__lowercase = [0]
__lowercase = 42
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__lowercase = 0
# the area corresponding to the grid that gives the product closest to target
__lowercase = 0
# an estimate of b, using the quadratic formula
__lowercase = 42
# the largest integer less than b_estimate
__lowercase = 42
# the largest integer less than b_estimate
__lowercase = 42
# the triangle number corresponding to b_floor
__lowercase = 42
# the triangle number corresponding to b_ceil
__lowercase = 42
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__lowercase = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__lowercase = floor(lowerCamelCase )
__lowercase = ceil(lowerCamelCase )
__lowercase = triangle_numbers[b_floor]
__lowercase = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__lowercase = triangle_b_first_guess * triangle_a
__lowercase = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__lowercase = triangle_b_second_guess * triangle_a
__lowercase = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'''{solution() = }''')
| 80
|
"""simple docstring"""
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
SCREAMING_SNAKE_CASE_ = [
'''kernels/rwkv/wkv_cuda.cu''',
'''kernels/rwkv/wkv_op.cpp''',
'''kernels/deformable_detr/ms_deform_attn.h''',
'''kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh''',
'''models/graphormer/algos_graphormer.pyx''',
]
def lowercase (_lowerCAmelCase ):
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('''--check_lib''', action='''store_true''', help='''Whether to check the build or the actual package.''')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
if args.check_lib:
SCREAMING_SNAKE_CASE_ = importlib.import_module('''transformers''')
SCREAMING_SNAKE_CASE_ = Path(transformers_module.__file__).parent
else:
SCREAMING_SNAKE_CASE_ = Path.cwd() / '''build/lib/transformers'''
if not test_custom_files_are_present(transformers_path):
raise ValueError('''The built release does not contain the custom files. Fix this before going further!''')
| 465
| 0
|
"""simple docstring"""
from collections.abc import Generator
def lowercase () -> Generator[int, None, None]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0, 1
while True:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = b, a + b
yield b
def lowercase (SCREAMING_SNAKE_CASE_ : int = 10_00 ) -> int:
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = fibonacci_generator()
while len(str(next(SCREAMING_SNAKE_CASE_ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 327
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = """decision_transformer"""
SCREAMING_SNAKE_CASE_ : int = ["""past_key_values"""]
SCREAMING_SNAKE_CASE_ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCAmelCase__=17 , lowerCAmelCase__=4 , lowerCAmelCase__=128 , lowerCAmelCase__=4_096 , lowerCAmelCase__=True , lowerCAmelCase__=1 , lowerCAmelCase__=1_024 , lowerCAmelCase__=3 , lowerCAmelCase__=1 , lowerCAmelCase__=None , lowerCAmelCase__="relu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1e-5 , lowerCAmelCase__=0.02 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=50_256 , lowerCAmelCase__=50_256 , lowerCAmelCase__=False , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> List[Any]:
SCREAMING_SNAKE_CASE = state_dim
SCREAMING_SNAKE_CASE = act_dim
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = max_ep_len
SCREAMING_SNAKE_CASE = action_tanh
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = n_positions
SCREAMING_SNAKE_CASE = n_layer
SCREAMING_SNAKE_CASE = n_head
SCREAMING_SNAKE_CASE = n_inner
SCREAMING_SNAKE_CASE = activation_function
SCREAMING_SNAKE_CASE = resid_pdrop
SCREAMING_SNAKE_CASE = embd_pdrop
SCREAMING_SNAKE_CASE = attn_pdrop
SCREAMING_SNAKE_CASE = layer_norm_epsilon
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = scale_attn_weights
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = scale_attn_by_inverse_layer_idx
SCREAMING_SNAKE_CASE = reorder_and_upcast_attn
SCREAMING_SNAKE_CASE = bos_token_id
SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 327
| 1
|
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __magic_name__ ( __UpperCAmelCase ) -> List[Tuple[int, ...]]:
'''simple docstring'''
snake_case_ = []
if isinstance(lowercase__, lowercase__ ):
for v in tree.values():
shapes.extend(_fetch_dims(lowercase__ ) )
elif isinstance(lowercase__, (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowercase__ ) )
elif isinstance(lowercase__, torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Tuple[int, ...]:
'''simple docstring'''
snake_case_ = []
for d in reversed(lowercase__ ):
idx.append(flat_idx % d )
snake_case_ = flat_idx // d
return tuple(reversed(lowercase__ ) )
@torch.jit.ignore
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = None, __UpperCAmelCase = None, ) -> List[Tuple[slice, ...]]:
'''simple docstring'''
def reduce_edge_list(__UpperCAmelCase ) -> None:
snake_case_ = True
for i in range(len(lowercase__ ) ):
snake_case_ = -1 * (i + 1)
l[reversed_idx] &= tally
snake_case_ = l[reversed_idx]
if start_edges is None:
snake_case_ = [s == 0 for s in start]
reduce_edge_list(lowercase__ )
if end_edges is None:
snake_case_ = [e == (d - 1) for e, d in zip(lowercase__, lowercase__ )]
reduce_edge_list(lowercase__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowercase__ ) == 0:
return [()]
elif len(lowercase__ ) == 1:
return [(slice(start[0], end[0] + 1 ),)]
snake_case_ = []
snake_case_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowercase__, lowercase__ ):
if s == e:
path_list.append(slice(lowercase__, s + 1 ) )
else:
break
snake_case_ = tuple(lowercase__ )
snake_case_ = len(lowercase__ )
# start == end, and we're done
if divergence_idx == len(lowercase__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
snake_case_ = start[divergence_idx]
return tuple(
path + (slice(lowercase__, sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :], [d - 1 for d in dims[divergence_idx + 1 :]], dims[divergence_idx + 1 :], start_edges=start_edges[divergence_idx + 1 :], end_edges=[True for _ in end_edges[divergence_idx + 1 :]], ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
snake_case_ = end[divergence_idx]
return tuple(
path + (slice(lowercase__, edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]], end[divergence_idx + 1 :], dims[divergence_idx + 1 :], start_edges=[True for _ in start_edges[divergence_idx + 1 :]], end_edges=end_edges[divergence_idx + 1 :], ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx], end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx], end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1, end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
snake_case_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1, end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> torch.Tensor:
'''simple docstring'''
snake_case_ = t.shape[:no_batch_dims]
snake_case_ = list(_flat_idx_to_idx(lowercase__, lowercase__ ) )
# _get_minimal_slice_set is inclusive
snake_case_ = list(_flat_idx_to_idx(flat_end - 1, lowercase__ ) )
# Get an ordered list of slices to perform
snake_case_ = _get_minimal_slice_set(
lowercase__, lowercase__, lowercase__, )
snake_case_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = False, __UpperCAmelCase = None, __UpperCAmelCase = False, ) -> Any:
'''simple docstring'''
if not (len(lowercase__ ) > 0):
raise ValueError('''Must provide at least one input''' )
snake_case_ = [shape[:no_batch_dims] for shape in _fetch_dims(lowercase__ )]
snake_case_ = tuple([max(lowercase__ ) for s in zip(*lowercase__ )] )
def _prep_inputs(__UpperCAmelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
snake_case_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
snake_case_ = t.reshape(-1, *t.shape[no_batch_dims:] )
else:
snake_case_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
snake_case_ = tensor_tree_map(_prep_inputs, lowercase__ )
snake_case_ = None
if _out is not None:
snake_case_ = tensor_tree_map(lambda __UpperCAmelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ), _out )
snake_case_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
snake_case_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__UpperCAmelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
snake_case_ = 0
snake_case_ = prepped_outputs
for _ in range(lowercase__ ):
# Chunk the input
if not low_mem:
snake_case_ = _select_chunk
else:
snake_case_ = partial(
_chunk_slice, flat_start=lowercase__, flat_end=min(lowercase__, i + chunk_size ), no_batch_dims=len(lowercase__ ), )
snake_case_ = tensor_tree_map(lowercase__, lowercase__ )
# Run the layer on the chunk
snake_case_ = layer(**lowercase__ )
# Allocate space for the output
if out is None:
snake_case_ = tensor_tree_map(lambda __UpperCAmelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ), lowercase__ )
# Put the chunk in its pre-allocated space
if isinstance(lowercase__, lowercase__ ):
def assign(__UpperCAmelCase, __UpperCAmelCase ) -> None:
for k, v in da.items():
if isinstance(lowercase__, lowercase__ ):
assign(lowercase__, da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
snake_case_ = da[k]
assign(lowercase__, lowercase__ )
elif isinstance(lowercase__, lowercase__ ):
for xa, xa in zip(lowercase__, lowercase__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
snake_case_ = xa
elif isinstance(lowercase__, torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
snake_case_ = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
snake_case_ = tensor_tree_map(lambda __UpperCAmelCase : t.view(orig_batch_dims + t.shape[1:] ), lowercase__ )
return out
class a :
def __init__( self : str , lowercase_ : int = 512 , ):
snake_case_ = max_chunk_size
snake_case_ = None
snake_case_ = None
def A_ ( self : List[Any] , lowercase_ : Callable , lowercase_ : tuple , lowercase_ : int ):
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
snake_case_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
snake_case_ = [c for c in candidates if c > min_chunk_size]
snake_case_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(lowercase_ : int ) -> bool:
try:
with torch.no_grad():
fn(*SCREAMING_SNAKE_CASE_ , chunk_size=SCREAMING_SNAKE_CASE_ )
return True
except RuntimeError:
return False
snake_case_ = 0
snake_case_ = len(SCREAMING_SNAKE_CASE_ ) - 1
while i > min_viable_chunk_size_index:
snake_case_ = test_chunk_size(candidates[i] )
if not viable:
snake_case_ = (min_viable_chunk_size_index + i) // 2
else:
snake_case_ = i
snake_case_ = (i + len(SCREAMING_SNAKE_CASE_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def A_ ( self : Any , lowercase_ : Iterable , lowercase_ : Iterable ):
snake_case_ = True
for aa, aa in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assert type(SCREAMING_SNAKE_CASE_ ) == type(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
consistent &= self._compare_arg_caches(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
snake_case_ = [v for _, v in sorted(aa.items() , key=lambda lowercase_ : x[0] )]
snake_case_ = [v for _, v in sorted(aa.items() , key=lambda lowercase_ : x[0] )]
consistent &= self._compare_arg_caches(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
consistent &= aa == aa
return consistent
def A_ ( self : List[Any] , lowercase_ : Callable , lowercase_ : tuple , lowercase_ : int , ):
snake_case_ = True
snake_case_ = tree_map(lambda lowercase_ : a.shape if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) else a , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(SCREAMING_SNAKE_CASE_ )
snake_case_ = self._compare_arg_caches(self.cached_arg_data , SCREAMING_SNAKE_CASE_ )
else:
# Otherwise, we can reuse the precomputed value
snake_case_ = False
if not consistent:
snake_case_ = self._determine_favorable_chunk_size(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
snake_case_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 640
|
'''simple docstring'''
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square(lowercase__ : int , lowercase__ : int ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__snake_case = update_area_of_max_square(lowercase__ , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , col + 1 )
__snake_case = update_area_of_max_square(row + 1 , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
return sub_problem_sol
else:
return 0
__snake_case = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__snake_case = update_area_of_max_square_using_dp_array(lowercase__ , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , lowercase__ )
__snake_case = update_area_of_max_square_using_dp_array(row + 1 , lowercase__ , lowercase__ )
if mat[row][col]:
__snake_case = 1 + min([right, diagonal, down] )
__snake_case = max(largest_square_area[0] , lowercase__ )
__snake_case = sub_problem_sol
return sub_problem_sol
else:
return 0
__snake_case = [0]
__snake_case = [[-1] * cols for _ in range(lowercase__ )]
update_area_of_max_square_using_dp_array(0 , 0 , lowercase__ )
return largest_square_area[0]
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [[0] * (cols + 1) for _ in range(rows + 1 )]
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = dp_array[row][col + 1]
__snake_case = dp_array[row + 1][col + 1]
__snake_case = dp_array[row + 1][col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(dp_array[row][col] , lowercase__ )
else:
__snake_case = 0
return largest_square_area
def _a (lowercase__ : int , lowercase__ : int , lowercase__ : list[list[int]] ) -> int:
"""simple docstring"""
__snake_case = [0] * (cols + 1)
__snake_case = [0] * (cols + 1)
__snake_case = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
__snake_case = current_row[col + 1]
__snake_case = next_row[col + 1]
__snake_case = next_row[col]
if mat[row][col] == 1:
__snake_case = 1 + min(lowercase__ , lowercase__ , lowercase__ )
__snake_case = max(current_row[col] , lowercase__ )
else:
__snake_case = 0
__snake_case = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 56
| 0
|
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase ) -> int:
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
__UpperCAmelCase : int = grid[0]
for row_n in range(1 , len(UpperCamelCase ) ):
__UpperCAmelCase : int = grid[row_n]
__UpperCAmelCase : str = fill_row(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : List[str] = grid[row_n]
return grid[-1][-1]
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> list:
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(UpperCamelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 487
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class a__ :
lowercase_ = 42
# setable values
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
@classmethod
def a_ ( cls : List[str] , UpperCamelCase_ : CommonSchedulerState , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : jnp.ndarray):
"""simple docstring"""
return cls(common=UpperCamelCase_ , init_noise_sigma=UpperCamelCase_ , timesteps=UpperCamelCase_)
@dataclass
class a__ ( __magic_name__ ):
lowercase_ = 42
class a__ ( __magic_name__ , __magic_name__ ):
lowercase_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowercase_ = 42
@property
def a_ ( self : Optional[int]):
"""simple docstring"""
return True
@register_to_config
def __init__( self : str , UpperCamelCase_ : int = 1000 , UpperCamelCase_ : float = 0.0001 , UpperCamelCase_ : float = 0.02 , UpperCamelCase_ : str = "linear" , UpperCamelCase_ : Optional[jnp.ndarray] = None , UpperCamelCase_ : str = "fixed_small" , UpperCamelCase_ : bool = True , UpperCamelCase_ : str = "epsilon" , UpperCamelCase_ : jnp.dtype = jnp.floataa , ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = dtype
def a_ ( self : Optional[Any] , UpperCamelCase_ : Optional[CommonSchedulerState] = None):
"""simple docstring"""
if common is None:
__UpperCAmelCase : Tuple = CommonSchedulerState.create(self)
# standard deviation of the initial noise distribution
__UpperCAmelCase : Tuple = jnp.array(1.0 , dtype=self.dtype)
__UpperCAmelCase : Any = jnp.arange(0 , self.config.num_train_timesteps).round()[::-1]
return DDPMSchedulerState.create(
common=UpperCamelCase_ , init_noise_sigma=UpperCamelCase_ , timesteps=UpperCamelCase_ , )
def a_ ( self : Optional[Any] , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : Optional[int] = None):
"""simple docstring"""
return sample
def a_ ( self : Any , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : int , UpperCamelCase_ : Tuple = ()):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
__UpperCAmelCase : List[str] = (jnp.arange(0 , UpperCamelCase_) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=UpperCamelCase_ , timesteps=UpperCamelCase_ , )
def a_ ( self : Any , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[int]=None):
"""simple docstring"""
__UpperCAmelCase : List[str] = state.common.alphas_cumprod[t]
__UpperCAmelCase : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype))
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__UpperCAmelCase : Tuple = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
__UpperCAmelCase : Optional[Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
__UpperCAmelCase : str = jnp.clip(UpperCamelCase_ , a_min=1e-20)
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
__UpperCAmelCase : Optional[int] = jnp.log(jnp.clip(UpperCamelCase_ , a_min=1e-20))
elif variance_type == "fixed_large":
__UpperCAmelCase : Tuple = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
__UpperCAmelCase : str = jnp.log(state.common.betas[t])
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
__UpperCAmelCase : Any = variance
__UpperCAmelCase : Union[str, Any] = state.common.betas[t]
__UpperCAmelCase : List[str] = (predicted_variance + 1) / 2
__UpperCAmelCase : int = frac * max_log + (1 - frac) * min_log
return variance
def a_ ( self : Optional[int] , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : Optional[jax.random.KeyArray] = None , UpperCamelCase_ : bool = True , ):
"""simple docstring"""
__UpperCAmelCase : Dict = timestep
if key is None:
__UpperCAmelCase : List[str] = jax.random.PRNGKey(0)
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
__UpperCAmelCase , __UpperCAmelCase : int = jnp.split(UpperCamelCase_ , sample.shape[1] , axis=1)
else:
__UpperCAmelCase : List[str] = None
# 1. compute alphas, betas
__UpperCAmelCase : str = state.common.alphas_cumprod[t]
__UpperCAmelCase : str = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype))
__UpperCAmelCase : Tuple = 1 - alpha_prod_t
__UpperCAmelCase : int = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__UpperCAmelCase : Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__UpperCAmelCase : Optional[int] = model_output
elif self.config.prediction_type == "v_prediction":
__UpperCAmelCase : Optional[int] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
" for the FlaxDDPMScheduler.")
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__UpperCAmelCase : List[Any] = jnp.clip(UpperCamelCase_ , -1 , 1)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCAmelCase : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
__UpperCAmelCase : Union[str, Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__UpperCAmelCase : Optional[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
__UpperCAmelCase : int = jax.random.split(UpperCamelCase_ , num=1)
__UpperCAmelCase : Any = jax.random.normal(UpperCamelCase_ , shape=model_output.shape , dtype=self.dtype)
return (self._get_variance(UpperCamelCase_ , UpperCamelCase_ , predicted_variance=UpperCamelCase_) ** 0.5) * noise
__UpperCAmelCase : Tuple = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype))
__UpperCAmelCase : Tuple = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=UpperCamelCase_ , state=UpperCamelCase_)
def a_ ( self : Optional[Any] , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : jnp.ndarray , ):
"""simple docstring"""
return add_noise_common(state.common , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : Union[str, Any] , UpperCamelCase_ : DDPMSchedulerState , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : jnp.ndarray , UpperCamelCase_ : jnp.ndarray , ):
"""simple docstring"""
return get_velocity_common(state.common , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
def __len__( self : int):
"""simple docstring"""
return self.config.num_train_timesteps
| 487
| 1
|
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if n_term == "":
return []
UpperCAmelCase_ : list = []
for temp in range(int(SCREAMING_SNAKE_CASE_ ) ):
series.append(f'''1/{temp + 1}''' if series else '''1''' )
return series
if __name__ == "__main__":
__a = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 30
|
class snake_case__ :
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
"""simple docstring"""
a_ : Optional[Any] = name
a_ : Union[str, Any] = val
def __str__( self ) -> Tuple:
"""simple docstring"""
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self , UpperCamelCase_ ) -> Union[str, Any]:
"""simple docstring"""
return self.val < other.val
class snake_case__ :
def __init__( self , UpperCamelCase_ ) -> int:
"""simple docstring"""
a_ : Tuple = {}
a_ : Optional[int] = {}
a_ : Tuple = self.build_heap(UpperCamelCase_ )
def __getitem__( self , UpperCamelCase_ ) -> Any:
"""simple docstring"""
return self.get_value(UpperCamelCase_ )
def A ( self , UpperCamelCase_ ) -> List[Any]:
"""simple docstring"""
return (idx - 1) // 2
def A ( self , UpperCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
return idx * 2 + 1
def A ( self , UpperCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
return idx * 2 + 2
def A ( self , UpperCamelCase_ ) -> List[Any]:
"""simple docstring"""
return self.heap_dict[key]
def A ( self , UpperCamelCase_ ) -> Optional[int]:
"""simple docstring"""
a_ : Tuple = len(UpperCamelCase_ ) - 1
a_ : Union[str, Any] = self.get_parent_idx(UpperCamelCase_ )
for idx, i in enumerate(UpperCamelCase_ ):
a_ : Tuple = idx
a_ : Optional[int] = i.val
for i in range(UpperCamelCase_ , -1 , -1 ):
self.sift_down(UpperCamelCase_ , UpperCamelCase_ )
return array
def A ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
"""simple docstring"""
while True:
a_ : Tuple = self.get_left_child_idx(UpperCamelCase_ ) # noqa: E741
a_ : Optional[Any] = self.get_right_child_idx(UpperCamelCase_ )
a_ : Union[str, Any] = idx
if l < len(UpperCamelCase_ ) and array[l] < array[idx]:
a_ : int = l
if r < len(UpperCamelCase_ ) and array[r] < array[smallest]:
a_ : Optional[int] = r
if smallest != idx:
a_ , a_ : Optional[int] = array[smallest], array[idx]
(
(
a_
) , (
a_
) ,
) : Tuple = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
a_ : List[str] = smallest
else:
break
def A ( self , UpperCamelCase_ ) -> Any:
"""simple docstring"""
a_ : Union[str, Any] = self.get_parent_idx(UpperCamelCase_ )
while p >= 0 and self.heap[p] > self.heap[idx]:
a_ , a_ : Tuple = self.heap[idx], self.heap[p]
a_ , a_ : Tuple = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
a_ : Dict = p
a_ : Tuple = self.get_parent_idx(UpperCamelCase_ )
def A ( self ) -> Any:
"""simple docstring"""
return self.heap[0]
def A ( self ) -> str:
"""simple docstring"""
a_ , a_ : Any = self.heap[-1], self.heap[0]
a_ , a_ : List[Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
a_ : Any = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def A ( self , UpperCamelCase_ ) -> int:
"""simple docstring"""
self.heap.append(UpperCamelCase_ )
a_ : List[Any] = len(self.heap ) - 1
a_ : Optional[int] = node.val
self.sift_up(len(self.heap ) - 1 )
def A ( self ) -> Any:
"""simple docstring"""
return len(self.heap ) == 0
def A ( self , UpperCamelCase_ , UpperCamelCase_ ) -> int:
"""simple docstring"""
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
a_ : str = new_value
a_ : Optional[Any] = new_value
self.sift_up(self.idx_of_element[node] )
SCREAMING_SNAKE_CASE : int = Node("R", -1)
SCREAMING_SNAKE_CASE : List[str] = Node("B", 6)
SCREAMING_SNAKE_CASE : Optional[int] = Node("A", 3)
SCREAMING_SNAKE_CASE : Union[str, Any] = Node("X", 1)
SCREAMING_SNAKE_CASE : Dict = Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
SCREAMING_SNAKE_CASE : Optional[int] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 419
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Any = 'vit'
def __init__( self , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.0 , __A=0.0 , __A=0.02 , __A=1E-12 , __A=224 , __A=16 , __A=3 , __A=True , __A=16 , **__A , ) -> Tuple:
super().__init__(**__A )
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =image_size
_lowerCAmelCase =patch_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =qkv_bias
_lowerCAmelCase =encoder_stride
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Optional[Any] = version.parse('1.11')
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCamelCase__ ( self ) -> float:
return 1E-4
| 58
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'''vocab_file''': '''vocab.txt'''}
lowercase_ = {
'''vocab_file''': {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt''',
}
}
lowercase_ = {
'''YituTech/conv-bert-base''': 512,
'''YituTech/conv-bert-medium-small''': 512,
'''YituTech/conv-bert-small''': 512,
}
lowercase_ = {
'''YituTech/conv-bert-base''': {'''do_lower_case''': True},
'''YituTech/conv-bert-medium-small''': {'''do_lower_case''': True},
'''YituTech/conv-bert-small''': {'''do_lower_case''': True},
}
class SCREAMING_SNAKE_CASE ( __lowercase):
"""simple docstring"""
lowercase : Union[str, Any] = VOCAB_FILES_NAMES
lowercase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] = ConvBertTokenizer
def __init__( self , __A=None , __A=None , __A=True , __A="[UNK]" , __A="[SEP]" , __A="[PAD]" , __A="[CLS]" , __A="[MASK]" , __A=True , __A=None , **__A , ) -> Union[str, Any]:
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , tokenize_chinese_chars=__A , strip_accents=__A , **__A , )
_lowerCAmelCase =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __A ) != do_lower_case
or normalizer_state.get('strip_accents' , __A ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __A ) != tokenize_chinese_chars
):
_lowerCAmelCase =getattr(__A , normalizer_state.pop('type' ) )
_lowerCAmelCase =do_lower_case
_lowerCAmelCase =strip_accents
_lowerCAmelCase =tokenize_chinese_chars
_lowerCAmelCase =normalizer_class(**__A )
_lowerCAmelCase =do_lower_case
def UpperCamelCase__ ( self , __A , __A=None ) -> int:
_lowerCAmelCase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase__ ( self , __A , __A = None ) -> List[int]:
_lowerCAmelCase =[self.sep_token_id]
_lowerCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase__ ( self , __A , __A = None ) -> Tuple[str]:
_lowerCAmelCase =self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 58
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _A ( unittest.TestCase ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=7 ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=18 ,SCREAMING_SNAKE_CASE_=30 ,SCREAMING_SNAKE_CASE_=400 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=None ,SCREAMING_SNAKE_CASE_=True ,):
'''simple docstring'''
snake_case : List[str] = size if size is not None else {"""height""": 18, """width""": 18}
snake_case : int = parent
snake_case : str = batch_size
snake_case : Optional[int] = num_channels
snake_case : Optional[Any] = image_size
snake_case : Union[str, Any] = min_resolution
snake_case : Optional[int] = max_resolution
snake_case : int = do_resize
snake_case : Optional[int] = size
snake_case : Tuple = apply_ocr
def snake_case_ ( self ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _A ( snake_case , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[int] = LayoutLMvaImageProcessingTester(self )
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ ,"""do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ ,"""size""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ ,"""apply_ocr""" ) )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 18, """width""": 18} )
snake_case : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{"""height""": 42, """width""": 42} )
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ):
'''simple docstring'''
# Initialize image_processing
snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ ,Image.Image )
# Test not batched input
snake_case : Tuple = image_processing(image_inputs[0] ,return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
self.assertIsInstance(encoding.words ,SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(encoding.boxes ,SCREAMING_SNAKE_CASE_ )
# Test batched
snake_case : Union[str, Any] = image_processing(SCREAMING_SNAKE_CASE_ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def snake_case_ ( self ):
'''simple docstring'''
# Initialize image_processing
snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE_ ,numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ ,np.ndarray )
# Test not batched input
snake_case : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
snake_case : List[Any] = image_processing(SCREAMING_SNAKE_CASE_ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def snake_case_ ( self ):
'''simple docstring'''
# Initialize image_processing
snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE_ ,torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ ,torch.Tensor )
# Test not batched input
snake_case : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
# Test batched
snake_case : int = image_processing(SCREAMING_SNAKE_CASE_ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) ,)
def snake_case_ ( self ):
'''simple docstring'''
# with apply_OCR = True
snake_case : Union[str, Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case : int = load_dataset("""hf-internal-testing/fixtures_docvqa""" ,split="""test""" )
snake_case : Any = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
snake_case : Optional[int] = image_processing(SCREAMING_SNAKE_CASE_ ,return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) ,len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case : Optional[Any] = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case : List[Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words ,SCREAMING_SNAKE_CASE_ )
self.assertListEqual(encoding.boxes ,SCREAMING_SNAKE_CASE_ )
# with apply_OCR = False
snake_case : Union[str, Any] = LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = image_processing(SCREAMING_SNAKE_CASE_ ,return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape ,(1, 3, 224, 224) )
| 36
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase :List[str] = logging.get_logger(__name__)
lowerCamelCase :Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCamelCase :Tuple = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
lowerCamelCase :Tuple = {'facebook/blenderbot_small-90M': 512}
def __snake_case ( _UpperCamelCase ) -> Any:
_a = set()
_a = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_a = char
_a = set(_UpperCamelCase )
return pairs
class UpperCAmelCase ( __snake_case ):
a: Any = VOCAB_FILES_NAMES
a: Any = PRETRAINED_VOCAB_FILES_MAP
a: Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a: int = ["input_ids", "attention_mask"]
def __init__( self: str , __UpperCamelCase: List[str] , __UpperCamelCase: Tuple , __UpperCamelCase: Union[str, Any]="__start__" , __UpperCamelCase: int="__end__" , __UpperCamelCase: Optional[int]="__unk__" , __UpperCamelCase: int="__null__" , **__UpperCamelCase: str , ):
super().__init__(unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , pad_token=__UpperCamelCase , **__UpperCamelCase )
with open(__UpperCamelCase , encoding='''utf-8''' ) as vocab_handle:
_a = json.load(__UpperCamelCase )
_a = {v: k for k, v in self.encoder.items()}
with open(__UpperCamelCase , encoding='''utf-8''' ) as merges_handle:
_a = merges_handle.read().split('''\n''' )[1:-1]
_a = [tuple(merge.split() ) for merge in merges]
_a = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_a = {}
@property
def _A ( self: List[Any] ):
return len(self.encoder )
def _A ( self: Any ):
return dict(self.encoder , **self.added_tokens_encoder )
def _A ( self: Optional[int] , __UpperCamelCase: str ):
if token in self.cache:
return self.cache[token]
_a = re.sub('''([.,!?()])''' , R''' \1''' , __UpperCamelCase )
_a = re.sub('''(\')''' , R''' \1 ''' , __UpperCamelCase )
_a = re.sub(R'''\s{2,}''' , ''' ''' , __UpperCamelCase )
if "\n" in token:
_a = token.replace('''\n''' , ''' __newln__''' )
_a = token.split(''' ''' )
_a = []
for token in tokens:
if not len(__UpperCamelCase ):
continue
_a = token.lower()
_a = tuple(__UpperCamelCase )
_a = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_a = get_pairs(__UpperCamelCase )
if not pairs:
words.append(__UpperCamelCase )
continue
while True:
_a = min(__UpperCamelCase , key=lambda __UpperCamelCase : self.bpe_ranks.get(__UpperCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_a , _a = bigram
_a = []
_a = 0
while i < len(__UpperCamelCase ):
try:
_a = word.index(__UpperCamelCase , __UpperCamelCase )
new_word.extend(word[i:j] )
_a = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_a = tuple(__UpperCamelCase )
_a = new_word
if len(__UpperCamelCase ) == 1:
break
else:
_a = get_pairs(__UpperCamelCase )
_a = '''@@ '''.join(__UpperCamelCase )
_a = word[:-4]
_a = word
words.append(__UpperCamelCase )
return " ".join(__UpperCamelCase )
def _A ( self: str , __UpperCamelCase: str ):
_a = []
_a = re.findall(R'''\S+\n?''' , __UpperCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__UpperCamelCase ).split(''' ''' ) ) )
return split_tokens
def _A ( self: Optional[int] , __UpperCamelCase: str ):
_a = token.lower()
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def _A ( self: List[Any] , __UpperCamelCase: int ):
return self.decoder.get(__UpperCamelCase , self.unk_token )
def _A ( self: Any , __UpperCamelCase: List[str] ):
_a = ''' '''.join(__UpperCamelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def _A ( self: int , __UpperCamelCase: str , __UpperCamelCase: Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_a = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + '''\n''' )
_a = 0
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
''' Please check that the tokenizer is not corrupted!''' )
_a = token_index
writer.write(''' '''.join(__UpperCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 487
| 0
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ = logging.get_logger(__name__)
class snake_case ( _UpperCamelCase):
__UpperCamelCase = ['input_features']
def __init__( self : int , a__ : Optional[Any]=80 , a__ : Optional[int]=1_60_00 , a__ : int=1_60 , a__ : Union[str, Any]=30 , a__ : Tuple=4_00 , a__ : List[Any]=0.0 , a__ : Optional[Any]=False , **a__ : List[Any] , ) -> str:
'''simple docstring'''
super().__init__(
feature_size=a__ , sampling_rate=a__ , padding_value=a__ , return_attention_mask=a__ , **a__ , )
_A = n_fft
_A = hop_length
_A = chunk_length
_A = chunk_length * sampling_rate
_A = self.n_samples // hop_length
_A = sampling_rate
_A = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a__ , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=a__ , norm="slaney" , mel_scale="slaney" , )
def a_ ( self : int , a__ : np.array ) -> np.ndarray:
'''simple docstring'''
_A = spectrogram(
a__ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
_A = log_spec[:, :-1]
_A = np.maximum(a__ , log_spec.max() - 8.0 )
_A = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( a__ : List[np.ndarray] , a__ : List[np.ndarray] , a__ : float = 0.0 ) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
_A = np.array(a__ , np.intaa )
_A = []
for vector, length in zip(a__ , attention_mask.sum(-1 ) ):
_A = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
_A = padding_value
normed_input_values.append(a__ )
else:
_A = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Optional[int] , a__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a__ : bool = True , a__ : Optional[int] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : Optional[bool] = None , a__ : Optional[str] = "max_length" , a__ : Optional[int] = None , a__ : Optional[int] = None , a__ : Optional[bool] = None , **a__ : Dict , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_A = isinstance(a__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_A = is_batched_numpy or (
isinstance(a__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a__ , np.ndarray ):
_A = np.asarray(a__ , dtype=np.floataa )
elif isinstance(a__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [np.asarray([raw_speech] ).T]
_A = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
_A = self.pad(
a__ , padding=a__ , max_length=max_length if max_length else self.n_samples , truncation=a__ , pad_to_multiple_of=a__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_A = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
_A = np.stack(padded_inputs["input_features"] , axis=0 )
# make sure list is in array format
_A = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 )
_A = [self._np_extract_fbank_features(a__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , a__ ):
_A = [np.asarray(a__ , dtype=np.floataa ) for feature in input_features]
else:
_A = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_A = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
_A = padded_inputs.convert_to_tensors(a__ )
return padded_inputs
def a_ ( self : Dict ) -> Dict[str, Any]:
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
_A = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 621
|
"""simple docstring"""
from __future__ import annotations
def a__ ( __lowercase , __lowercase ) -> float:
_A = sorted(numsa + numsa )
_A , _A = divmod(len(__lowercase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = [float(x) for x in input("Enter the elements of first array: ").split()]
a_ = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 621
| 1
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :torch.FloatTensor
__magic_name__ :torch.FloatTensor
class _lowerCAmelCase ( a , a ):
"""simple docstring"""
__magic_name__ :str = 1
@register_to_config
def __init__( self , __UpperCAmelCase = 2_0_0_0 , __UpperCAmelCase = 0.15 , __UpperCAmelCase = 0.01 , __UpperCAmelCase = 13_48.0 , __UpperCAmelCase = 1E-5 , __UpperCAmelCase = 1 , ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = sigma_max
# setable values
lowerCAmelCase__ :str = None
self.set_sigmas(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
return sample
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowerCAmelCase__ :str = torch.linspace(1 , __UpperCAmelCase , __UpperCAmelCase , device=__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = sigma_min if sigma_min is not None else self.config.sigma_min
lowerCAmelCase__ :Any = sigma_max if sigma_max is not None else self.config.sigma_max
lowerCAmelCase__ :Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowerCAmelCase__ :str = torch.exp(torch.linspace(math.log(__UpperCAmelCase ) , math.log(__UpperCAmelCase ) , __UpperCAmelCase ) )
lowerCAmelCase__ :Optional[int] = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
lowerCAmelCase__ :Dict = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowerCAmelCase__ :Tuple = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowerCAmelCase__ :List[str] = timesteps.to(self.discrete_sigmas.device )
lowerCAmelCase__ :Union[str, Any] = self.discrete_sigmas[timesteps].to(sample.device )
lowerCAmelCase__ :Optional[Any] = self.get_adjacent_sigma(__UpperCAmelCase , __UpperCAmelCase ).to(sample.device )
lowerCAmelCase__ :Optional[Any] = torch.zeros_like(__UpperCAmelCase )
lowerCAmelCase__ :str = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowerCAmelCase__ :int = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowerCAmelCase__ :List[Any] = diffusion.unsqueeze(-1 )
lowerCAmelCase__ :Any = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowerCAmelCase__ :Optional[int] = randn_tensor(
sample.shape , layout=sample.layout , generator=__UpperCAmelCase , device=sample.device , dtype=sample.dtype )
lowerCAmelCase__ :Union[str, Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowerCAmelCase__ :List[str] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__UpperCAmelCase , prev_sample_mean=__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowerCAmelCase__ :str = randn_tensor(sample.shape , layout=sample.layout , generator=__UpperCAmelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowerCAmelCase__ :Optional[Any] = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
lowerCAmelCase__ :Any = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
lowerCAmelCase__ :Tuple = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowerCAmelCase__ :Dict = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowerCAmelCase__ :Tuple = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowerCAmelCase__ :Optional[int] = step_size.unsqueeze(-1 )
lowerCAmelCase__ :List[str] = sample + step_size * model_output
lowerCAmelCase__ :List[str] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = timesteps.to(original_samples.device )
lowerCAmelCase__ :Optional[Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowerCAmelCase__ :Dict = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__UpperCAmelCase ) * sigmas[:, None, None, None]
)
lowerCAmelCase__ :List[Any] = noise + original_samples
return noisy_samples
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 93
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=14 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=0.02 , ):
lowercase : Any = parent
lowercase : Any = batch_size
lowercase : Union[str, Any] = seq_length
lowercase : Dict = is_training
lowercase : List[Any] = use_input_mask
lowercase : Optional[int] = use_token_type_ids
lowercase : Union[str, Any] = use_labels
lowercase : Any = vocab_size
lowercase : List[Any] = hidden_size
lowercase : str = rotary_dim
lowercase : Tuple = num_hidden_layers
lowercase : Dict = num_attention_heads
lowercase : Optional[int] = intermediate_size
lowercase : Optional[int] = hidden_act
lowercase : List[str] = hidden_dropout_prob
lowercase : Any = attention_probs_dropout_prob
lowercase : Union[str, Any] = max_position_embeddings
lowercase : List[Any] = initializer_range
lowercase : str = None
lowercase : Dict = vocab_size - 1
lowercase : List[Any] = vocab_size - 1
lowercase : Optional[Any] = vocab_size - 1
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : List[Any] = None
if self.use_input_mask:
lowercase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=SCREAMING_SNAKE_CASE__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __lowerCamelCase ( self ):
lowercase : str = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : int = config_and_inputs
lowercase : List[str] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = 20
lowercase : Optional[int] = model_class_name(SCREAMING_SNAKE_CASE__ )
lowercase : int = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowercase : Union[str, Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowercase : str = model(
input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , position_ids=SCREAMING_SNAKE_CASE__ , )
lowercase : Dict = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowercase : int = model(
input_ids[:, -1:] , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=outputs_cache.past_key_values , position_ids=SCREAMING_SNAKE_CASE__ , )
lowercase : Tuple = model(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = 20
lowercase : List[Any] = model_class_name(SCREAMING_SNAKE_CASE__ )
lowercase : Any = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowercase : Optional[Any] = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE__ )
lowercase : int = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowercase : List[Any] = model(
input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , position_ids=SCREAMING_SNAKE_CASE__ , )
lowercase : Optional[int] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowercase : str = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=SCREAMING_SNAKE_CASE__ , position_ids=SCREAMING_SNAKE_CASE__ , )
lowercase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
A : Optional[Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A : Union[str, Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __lowerCamelCase ( self ):
lowercase : List[Any] = FlaxGPTJModelTester(self )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
lowercase , lowercase , lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
lowercase , lowercase , lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@tooslow
def __lowerCamelCase ( self ):
lowercase : str = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
lowercase : int = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
lowercase : Tuple = False
lowercase : List[Any] = model.config.eos_token_id
lowercase : Optional[int] = jax.jit(model.generate )
lowercase : Optional[Any] = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
lowercase : Union[str, Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
lowercase , lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowercase : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase : List[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase : List[str] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Union[str, Any] = pt_inputs['''input_ids'''].shape
lowercase : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
lowercase : int = 0
lowercase : List[str] = 1
lowercase : int = 0
lowercase : Optional[Any] = 1
lowercase : Dict = pt_model_class(SCREAMING_SNAKE_CASE__ ).eval()
lowercase : int = model_class(SCREAMING_SNAKE_CASE__ , dtype=jnp.floataa )
lowercase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = fx_state
with torch.no_grad():
lowercase : Optional[int] = pt_model(**SCREAMING_SNAKE_CASE__ ).to_tuple()
lowercase : Union[str, Any] = fx_model(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , from_pt=SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = fx_model_loaded(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
lowercase , lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowercase : Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase : Any = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase : Any = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Dict = pt_model_class(SCREAMING_SNAKE_CASE__ ).eval()
lowercase : Tuple = model_class(SCREAMING_SNAKE_CASE__ , dtype=jnp.floataa )
lowercase : List[Any] = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE__ , fx_model.params )
lowercase , lowercase : Optional[int] = pt_inputs['''input_ids'''].shape
lowercase : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = 0
lowercase : List[Any] = 1
lowercase : str = 0
lowercase : Tuple = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowercase : List[Any] = pt_model(**SCREAMING_SNAKE_CASE__ ).to_tuple()
lowercase : Optional[Any] = fx_model(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : int = pt_model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , from_flax=SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
lowercase : Dict = pt_model_loaded(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
lowercase : Dict = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
lowercase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
| 319
| 0
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__UpperCamelCase : Union[str, Any] = None
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : Dict = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__UpperCamelCase : Optional[Any] = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""",
},
}
# TODO(PVP) - this should be removed in Transformers v5
__UpperCamelCase : List[Any] = {
"""t5-small""": 512,
"""t5-base""": 512,
"""t5-large""": 512,
"""t5-3b""": 512,
"""t5-11b""": 512,
}
class __magic_name__ ( _a):
A: Union[str, Any] = VOCAB_FILES_NAMES
A: Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A: List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A: Tuple = ["""input_ids""", """attention_mask"""]
A: List[str] = TaTokenizer
A: List[int] = []
def __init__( self : Optional[int] , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : str=None , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : str="<unk>" , lowerCamelCase__ : Tuple="<pad>" , lowerCamelCase__ : List[str]=100 , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : Dict , ) -> Dict:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
UpperCamelCase__ : Optional[Any] = [F"<extra_id_{i}>" for i in range(snake_case_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
UpperCamelCase__ : Dict = len(set(filter(lambda lowerCamelCase__ : bool('''extra_id_''' in str(snake_case_ ) ) , snake_case_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , extra_ids=snake_case_ , additional_special_tokens=snake_case_ , **snake_case_ , )
UpperCamelCase__ : Optional[Any] = vocab_file
UpperCamelCase__ : Any = False if not self.vocab_file else True
UpperCamelCase__ : Tuple = extra_ids
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) -> int:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
UpperCamelCase__ : List[str] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F" {pretrained_model_name_or_path} automatically truncating your input to"
F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , snake_case_ , )
return max_model_length
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(snake_case_ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase__ : Tuple = os.path.join(
snake_case_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ):
copyfile(self.vocab_file , snake_case_ )
logger.info(F"Copy vocab file to {out_vocab_file}" )
return (out_vocab_file,)
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict = None ) -> List[int]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
UpperCamelCase__ : Any = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] = None ) -> List[int]:
'''simple docstring'''
UpperCamelCase__ : int = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return list(
set(filter(lambda lowerCamelCase__ : bool(re.search(r'''<extra_id_\d+>''' , snake_case_ ) ) is not None , self.additional_special_tokens ) ) )
def UpperCAmelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
return [self.convert_tokens_to_ids(snake_case_ ) for token in self.get_sentinel_tokens()]
| 720
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model"}
__UpperCamelCase : List[Any] = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
__UpperCamelCase : int = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
__UpperCamelCase : int = "▁"
class __magic_name__ ( __lowerCAmelCase):
A: Tuple = VOCAB_FILES_NAMES
A: Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A: Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple="<s>" , lowerCamelCase__ : Tuple="</s>" , lowerCamelCase__ : int="</s>" , lowerCamelCase__ : Optional[Any]="<s>" , lowerCamelCase__ : List[str]="<unk>" , lowerCamelCase__ : str="<pad>" , lowerCamelCase__ : int="<mask>" , lowerCamelCase__ : Optional[Dict[str, Any]] = None , **lowerCamelCase__ : Any , ) -> None:
'''simple docstring'''
UpperCamelCase__ : str = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
UpperCamelCase__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
UpperCamelCase__ : Any = vocab_file
UpperCamelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
UpperCamelCase__ : Any = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
UpperCamelCase__ : Tuple = len(self.sp_model ) - 1
UpperCamelCase__ : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase__ : List[str] = [self.cls_token_id]
UpperCamelCase__ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCamelCase__ : int = [self.sep_token_id]
UpperCamelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
return len(self.sp_model )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Tuple = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self : int , lowerCamelCase__ : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCamelCase__ : List[str] = self.sp_model.PieceToId(lowerCamelCase__ )
return spm_id if spm_id else self.unk_token_id
def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : int ) -> List[str]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(lowerCamelCase__ )
def UpperCAmelCase__ ( self : str , lowerCamelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = []
UpperCamelCase__ : Any = ''''''
UpperCamelCase__ : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase__ ) + token
UpperCamelCase__ : str = True
UpperCamelCase__ : Tuple = []
else:
current_sub_tokens.append(lowerCamelCase__ )
UpperCamelCase__ : Any = False
out_string += self.sp_model.decode(lowerCamelCase__ )
return out_string.strip()
def __getstate__( self : Tuple ) -> Dict:
'''simple docstring'''
UpperCamelCase__ : str = self.__dict__.copy()
UpperCamelCase__ : int = None
return state
def __setstate__( self : Tuple , lowerCamelCase__ : Any ) -> str:
'''simple docstring'''
UpperCamelCase__ : List[Any] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
UpperCamelCase__ : Optional[Any] = {}
UpperCamelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase__ : Any = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , '''wb''' ) as fi:
UpperCamelCase__ : int = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,)
| 106
| 0
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class a :
'''simple docstring'''
def __init__( self : List[Any] , __snake_case : Tuple , __snake_case : Union[str, Any]=3 , __snake_case : Optional[int]=32 , __snake_case : Optional[Any]=3 , __snake_case : str=10 , __snake_case : int=[8, 16, 32, 64] , __snake_case : Any=[1, 1, 2, 1] , __snake_case : Union[str, Any]=True , __snake_case : Tuple=True , __snake_case : Tuple="relu" , __snake_case : Union[str, Any]=3 , __snake_case : Dict=None , __snake_case : str=["stage2", "stage3", "stage4"] , __snake_case : List[str]=[2, 3, 4] , __snake_case : Optional[Any]=1 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = embeddings_size
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = depths
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = scope
UpperCAmelCase_ = len(lowerCamelCase__ )
UpperCAmelCase_ = out_features
UpperCAmelCase_ = out_indices
UpperCAmelCase_ = num_groups
def lowerCamelCase_ ( self : str ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : List[str] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowerCamelCase_ ( self : Any , __snake_case : str , __snake_case : int , __snake_case : Any ):
UpperCAmelCase_ = BitModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase_ = model(lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase_ ( self : Any , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : str ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = BitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase_ = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Tuple ):
UpperCAmelCase_ = BitBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase_ = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCAmelCase_ = None
UpperCAmelCase_ = BitBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCAmelCase_ = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase_ ( self : Any ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Dict = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCAmelCase : Any = (
{'feature-extraction': BitModel, 'image-classification': BitForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
lowerCAmelCase : List[str] = False
lowerCAmelCase : Any = False
def lowerCamelCase_ ( self : Any ):
UpperCAmelCase_ = BitModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : List[str] ):
return
@unittest.skip(reason='''Bit does not output attentions''' )
def lowerCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def lowerCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def lowerCamelCase_ ( self : str ):
pass
def lowerCamelCase_ ( self : Union[str, Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCamelCase__ )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowerCamelCase_ ( self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
def lowerCamelCase_ ( self : Union[str, Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(config=lowerCamelCase__ )
for name, module in model.named_modules():
if isinstance(lowerCamelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
def lowerCamelCase_ ( self : Tuple ):
def check_hidden_states_output(__snake_case : Any , __snake_case : List[str] , __snake_case : Dict ):
UpperCAmelCase_ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
UpperCAmelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCAmelCase_ = layer_type
UpperCAmelCase_ = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def lowerCamelCase_ ( self : int ):
pass
def lowerCamelCase_ ( self : Dict ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def lowerCamelCase_ ( self : List[Any] ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = BitModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase_ ( self : Optional[Any] ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self : Any ):
UpperCAmelCase_ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase__ )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=lowerCamelCase__ , return_tensors='''pt''' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**lowerCamelCase__ )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
UpperCAmelCase_ = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
@require_torch
class a ( A__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = (BitBackbone,) if is_torch_available() else ()
lowerCAmelCase : Tuple = BitConfig
lowerCAmelCase : Any = False
def lowerCamelCase_ ( self : Optional[int] ):
UpperCAmelCase_ = BitModelTester(self )
| 144
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : int = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 661
| 0
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
_SCREAMING_SNAKE_CASE = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""")
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , ) -> Union[str, Any]:
"""simple docstring"""
output_path.parent.mkdir(parents=__A , exist_ok=__A )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , use_external_data_format=__A , enable_onnx_checker=__A , opset_version=__A , )
else:
export(
__A , __A , f=output_path.as_posix() , input_names=__A , output_names=__A , dynamic_axes=__A , do_constant_folding=__A , opset_version=__A , )
@torch.no_grad()
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False ) -> List[Any]:
"""simple docstring"""
__snake_case = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
__snake_case = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
__snake_case = """cpu"""
__snake_case = Path(__A )
# VAE DECODER
__snake_case = AutoencoderKL.from_pretrained(model_path + "/vae" )
__snake_case = vae_decoder.config.latent_channels
# forward only through the decoder part
__snake_case = vae_decoder.decode
onnx_export(
__A , model_args=(
torch.randn(1 , __A , 25 , 25 ).to(device=__A , dtype=__A ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=__A , )
del vae_decoder
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--model_path""",
type=str,
required=True,
help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""",
)
parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--opset""",
default=14,
type=int,
help="""The version of the ONNX operator set to use.""",
)
parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("""SD: Done: ONNX""")
| 715
|
"""simple docstring"""
from collections.abc import Sequence
def __UpperCamelCase ( SCREAMING_SNAKE_CASE = None ) -> int:
"""simple docstring"""
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
__snake_case = nums[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
__snake_case = nums[i]
__snake_case = max(SCREAMING_SNAKE_CASE , ans + num , SCREAMING_SNAKE_CASE )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_SCREAMING_SNAKE_CASE = int(input("""Enter number of elements : """).strip())
_SCREAMING_SNAKE_CASE = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array))
| 614
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.