code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import re
import string
import numpy as np
import datasets
SCREAMING_SNAKE_CASE__ : List[str] = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
SCREAMING_SNAKE_CASE__ : List[Any] = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
SCREAMING_SNAKE_CASE__ : str = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ (datasets.Metric ):
'''simple docstring'''
def _lowercase ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=False , ) -> Optional[int]:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowerCamelCase : Dict = np.array([re.sub(UpperCamelCase__ , "" , UpperCamelCase__ ) for x in predictions] )
lowerCamelCase : Any = np.array([re.sub(UpperCamelCase__ , "" , UpperCamelCase__ ) for x in references] )
else:
lowerCamelCase : List[Any] = np.asarray(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = np.asarray(UpperCamelCase__ )
if ignore_case:
lowerCamelCase : str = np.char.lower(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = np.char.lower(UpperCamelCase__ )
if ignore_punctuation:
lowerCamelCase : Union[str, Any] = string.punctuation.maketrans("" , "" , string.punctuation )
lowerCamelCase : List[str] = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
lowerCamelCase : Any = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
if ignore_numbers:
lowerCamelCase : Union[str, Any] = string.digits.maketrans("" , "" , string.digits )
lowerCamelCase : Optional[int] = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
lowerCamelCase : str = np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
lowerCamelCase : Optional[int] = predictions == references
return {"exact_match": np.mean(UpperCamelCase__ ) * 100}
| 48
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> Any:
lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowerCamelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> str:
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase : Optional[int] = ""
else:
lowerCamelCase : List[str] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase : List[str] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase : Optional[int] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : List[Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
lowerCamelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : List[Any] = in_proj_bias[-config.hidden_size :]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : List[str] = dct.pop(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = val
def A ( ) -> List[str]:
lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : str = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowerCamelCase : Union[str, Any] = DeiTConfig()
# all deit models have fine-tuned heads
lowerCamelCase : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase : Dict = 1000
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "imagenet-1k-id2label.json"
lowerCamelCase : List[Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) )
lowerCamelCase : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Dict = int(deit_name[-6:-4] )
lowerCamelCase : Optional[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
lowerCamelCase : Optional[Any] = 192
lowerCamelCase : List[str] = 768
lowerCamelCase : Tuple = 12
lowerCamelCase : Optional[Any] = 3
elif deit_name[9:].startswith("small" ):
lowerCamelCase : str = 384
lowerCamelCase : Optional[Any] = 1536
lowerCamelCase : Dict = 12
lowerCamelCase : Optional[int] = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
lowerCamelCase : str = 1024
lowerCamelCase : List[str] = 4096
lowerCamelCase : Any = 24
lowerCamelCase : Dict = 16
# load original model from timm
lowerCamelCase : List[Any] = timm.create_model(_SCREAMING_SNAKE_CASE ,pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase : Dict = timm_model.state_dict()
lowerCamelCase : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowerCamelCase : Optional[Any] = DeiTForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by DeiTImageProcessor
lowerCamelCase : Any = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=_SCREAMING_SNAKE_CASE ,crop_size=config.image_size )
lowerCamelCase : str = image_processor(images=prepare_img() ,return_tensors="pt" )
lowerCamelCase : int = encoding["pixel_values"]
lowerCamelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = timm_model(_SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE ,outputs.logits ,atol=1e-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 48
| 1
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase__ (enum.Enum ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = 0
lowerCamelCase_ : Any = 1
lowerCamelCase_ : int = 2
@add_end_docstrings(lowerCAmelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowerCamelCase : Union[str, Any] = None
if self.model.config.prefix is not None:
lowerCamelCase : Dict = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowerCamelCase : Tuple = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = self._sanitize_parameters(prefix=UpperCamelCase__ , **self._forward_params )
lowerCamelCase : Any = {**self._preprocess_params, **preprocess_params}
lowerCamelCase : Union[str, Any] = {**self._forward_params, **forward_params}
def _lowercase ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> Union[str, Any]:
lowerCamelCase : List[Any] = {}
if prefix is not None:
lowerCamelCase : List[Any] = prefix
if prefix:
lowerCamelCase : Any = self.tokenizer(
UpperCamelCase__ , padding=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=self.framework )
lowerCamelCase : List[str] = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
lowerCamelCase : List[str] = handle_long_generation
preprocess_params.update(UpperCamelCase__ )
lowerCamelCase : List[Any] = generate_kwargs
lowerCamelCase : Optional[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
lowerCamelCase : int = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
lowerCamelCase : List[Any] = ReturnType.TENSORS
if return_type is not None:
lowerCamelCase : int = return_type
if clean_up_tokenization_spaces is not None:
lowerCamelCase : Union[str, Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCamelCase : Tuple = self.tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
if len(UpperCamelCase__ ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
lowerCamelCase : Optional[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _lowercase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*UpperCamelCase__ , **UpperCamelCase__ )
def __call__( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__="" , UpperCamelCase__=None , **UpperCamelCase__ ) -> str:
lowerCamelCase : Union[str, Any] = self.tokenizer(
prefix + prompt_text , padding=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=self.framework )
lowerCamelCase : int = prompt_text
if handle_long_generation == "hole":
lowerCamelCase : Any = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowerCamelCase : Union[str, Any] = generate_kwargs["max_new_tokens"]
else:
lowerCamelCase : Any = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowerCamelCase : List[str] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
lowerCamelCase : List[str] = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
lowerCamelCase : Optional[int] = inputs["attention_mask"][:, -keep_length:]
return inputs
def _lowercase ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
lowerCamelCase : Union[str, Any] = model_inputs["input_ids"]
lowerCamelCase : Tuple = model_inputs.get("attention_mask" , UpperCamelCase__ )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowerCamelCase : List[Any] = None
lowerCamelCase : Any = None
lowerCamelCase : Optional[int] = 1
else:
lowerCamelCase : Any = input_ids.shape[0]
lowerCamelCase : Tuple = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowerCamelCase : Union[str, Any] = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
lowerCamelCase : Any = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
lowerCamelCase : Any = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowerCamelCase : List[str] = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowerCamelCase : List[str] = self.model.generate(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Optional[int] = generated_sequence.shape[0]
if self.framework == "pt":
lowerCamelCase : Dict = generated_sequence.reshape(UpperCamelCase__ , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowerCamelCase : int = tf.reshape(UpperCamelCase__ , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=ReturnType.FULL_TEXT , UpperCamelCase__=True ) -> str:
lowerCamelCase : Optional[int] = model_outputs["generated_sequence"][0]
lowerCamelCase : Tuple = model_outputs["input_ids"]
lowerCamelCase : int = model_outputs["prompt_text"]
lowerCamelCase : Any = generated_sequence.numpy().tolist()
lowerCamelCase : str = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowerCamelCase : Optional[Any] = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowerCamelCase : Optional[int] = self.tokenizer.decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowerCamelCase : Optional[Any] = 0
else:
lowerCamelCase : int = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ , ) )
if return_type == ReturnType.FULL_TEXT:
lowerCamelCase : int = prompt_text + text[prompt_length:]
else:
lowerCamelCase : Optional[Any] = text[prompt_length:]
lowerCamelCase : Optional[Any] = {"generated_text": all_text}
records.append(UpperCamelCase__ )
return records
| 48
|
import random
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> tuple:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
lowerCamelCase : List[Any] = items[random.randint(0 ,len(_SCREAMING_SNAKE_CASE ) - 1 )]
lowerCamelCase : Dict = 0
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = _partition(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE ,index - (m + count) )
| 48
| 1
|
import string
def A ( _SCREAMING_SNAKE_CASE ) -> None:
for key in range(len(string.ascii_uppercase ) ):
lowerCamelCase : Optional[int] = ""
for symbol in message:
if symbol in string.ascii_uppercase:
lowerCamelCase : Any = string.ascii_uppercase.find(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = num - key
if num < 0:
lowerCamelCase : Union[str, Any] = num + len(string.ascii_uppercase )
lowerCamelCase : str = translated + string.ascii_uppercase[num]
else:
lowerCamelCase : Optional[Any] = translated + symbol
print(f'''Decryption using Key #{key}: {translated}''' )
def A ( ) -> None:
lowerCamelCase : List[Any] = input("Encrypted message: " )
lowerCamelCase : int = message.upper()
decrypt(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 48
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
return x if y == 0 else greatest_common_divisor(_SCREAMING_SNAKE_CASE ,x % y )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
return (x * y) // greatest_common_divisor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE = 20 ) -> int:
lowerCamelCase : List[Any] = 1
for i in range(1 ,n + 1 ):
lowerCamelCase : List[str] = lcm(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 48
| 1
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = """pixel_values"""
lowerCamelCase_ : Optional[Any] = False
lowerCamelCase_ : str = TimmBackboneConfig
def __init__( self , UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
requires_backends(self , "timm" )
super().__init__(UpperCamelCase__ )
lowerCamelCase : Any = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(UpperCamelCase__ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
lowerCamelCase : List[Any] = getattr(UpperCamelCase__ , "use_pretrained_backbone" , UpperCamelCase__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
lowerCamelCase : str = config.out_indices if getattr(UpperCamelCase__ , "out_indices" , UpperCamelCase__ ) is not None else (-1,)
lowerCamelCase : Optional[Any] = timm.create_model(
config.backbone , pretrained=UpperCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCamelCase__ , **UpperCamelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowerCamelCase : List[Any] = self._backbone.return_layers
lowerCamelCase : Optional[int] = {layer["module"]: str(UpperCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(UpperCamelCase__ )
@classmethod
def _lowercase ( cls , UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
lowerCamelCase : Dict = kwargs.pop("config" , TimmBackboneConfig() )
lowerCamelCase : List[Any] = kwargs.pop("use_timm_backbone" , UpperCamelCase__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
lowerCamelCase : Any = kwargs.pop("num_channels" , config.num_channels )
lowerCamelCase : Tuple = kwargs.pop("features_only" , config.features_only )
lowerCamelCase : Optional[Any] = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
lowerCamelCase : Optional[int] = kwargs.pop("out_indices" , config.out_indices )
lowerCamelCase : Optional[Any] = TimmBackboneConfig(
backbone=UpperCamelCase__ , num_channels=UpperCamelCase__ , features_only=UpperCamelCase__ , use_pretrained_backbone=UpperCamelCase__ , out_indices=UpperCamelCase__ , )
return super()._from_config(UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> Union[str, Any]:
pass
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
lowerCamelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowerCamelCase : Any = self._all_layers
lowerCamelCase : Union[str, Any] = self._backbone(UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : int = self._return_layers
lowerCamelCase : List[str] = tuple(hidden_states[i] for i in self.out_indices )
else:
lowerCamelCase : Optional[int] = self._backbone(UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Optional[Any] = None
lowerCamelCase : Optional[int] = tuple(UpperCamelCase__ )
lowerCamelCase : List[str] = tuple(UpperCamelCase__ ) if hidden_states is not None else None
if not return_dict:
lowerCamelCase : Dict = (feature_maps,)
if output_hidden_states:
lowerCamelCase : List[Any] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=UpperCamelCase__ , hidden_states=UpperCamelCase__ , attentions=UpperCamelCase__ )
| 48
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowerCAmelCase__ ) , """Tatoeba directory does not exist.""" )
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self ) -> int:
lowerCamelCase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=UpperCamelCase__ )
@slow
def _lowercase ( self ) -> List[Any]:
self.resolver.convert_models(["heb-eng"] )
@slow
def _lowercase ( self ) -> Tuple:
lowerCamelCase , lowerCamelCase : Dict = self.resolver.write_model_card("opus-mt-he-en" , dry_run=UpperCamelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 48
| 1
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 48
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
# Initialise PyTorch model
lowerCamelCase : Any = TaConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase : str = TaForConditionalGeneration(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 48
| 1
|
from typing import Dict
from .base import GenericTensor, Pipeline
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def _lowercase ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Optional[int]:
if tokenize_kwargs is None:
lowerCamelCase : List[Any] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)" )
lowerCamelCase : Union[str, Any] = truncation
lowerCamelCase : Optional[Any] = tokenize_kwargs
lowerCamelCase : List[str] = {}
if return_tensors is not None:
lowerCamelCase : Tuple = return_tensors
return preprocess_params, {}, postprocess_params
def _lowercase ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Dict[str, GenericTensor]:
lowerCamelCase : int = self.framework
lowerCamelCase : Optional[int] = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
return model_inputs
def _lowercase ( self , UpperCamelCase__ ) -> Optional[Any]:
lowerCamelCase : Dict = self.model(**UpperCamelCase__ )
return model_outputs
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=False ) -> str:
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
| 48
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {
'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json',
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = """blip_2_vision_model"""
def __init__( self , UpperCamelCase__=1408 , UpperCamelCase__=6144 , UpperCamelCase__=39 , UpperCamelCase__=16 , UpperCamelCase__=224 , UpperCamelCase__=14 , UpperCamelCase__="gelu" , UpperCamelCase__=0.00001 , UpperCamelCase__=0.0 , UpperCamelCase__=1e-10 , UpperCamelCase__=True , **UpperCamelCase__ , ) -> Optional[Any]:
super().__init__(**UpperCamelCase__ )
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Union[str, Any] = intermediate_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Dict = patch_size
lowerCamelCase : Tuple = image_size
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Union[str, Any] = attention_dropout
lowerCamelCase : Dict = layer_norm_eps
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : str = qkv_bias
@classmethod
def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : List[str] = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
lowerCamelCase : Optional[int] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = """blip_2_qformer"""
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0 , UpperCamelCase__="absolute" , UpperCamelCase__=2 , UpperCamelCase__=1408 , **UpperCamelCase__ , ) -> int:
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : Dict = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : int = hidden_act
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Dict = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Dict = max_position_embeddings
lowerCamelCase : List[str] = initializer_range
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : int = position_embedding_type
lowerCamelCase : Tuple = cross_attention_frequency
lowerCamelCase : Optional[int] = encoder_hidden_size
@classmethod
def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : str = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
lowerCamelCase : int = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : List[str] = """blip-2"""
lowerCamelCase_ : int = True
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=32 , **UpperCamelCase__ ) -> str:
super().__init__(**UpperCamelCase__ )
if vision_config is None:
lowerCamelCase : List[Any] = {}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." )
if qformer_config is None:
lowerCamelCase : List[Any] = {}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." )
if text_config is None:
lowerCamelCase : Any = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
lowerCamelCase : Optional[int] = BlipaVisionConfig(**UpperCamelCase__ )
lowerCamelCase : str = BlipaQFormerConfig(**UpperCamelCase__ )
lowerCamelCase : List[str] = text_config["model_type"] if "model_type" in text_config else "opt"
lowerCamelCase : str = CONFIG_MAPPING[text_model_type](**UpperCamelCase__ )
lowerCamelCase : Optional[Any] = self.text_config.tie_word_embeddings
lowerCamelCase : int = self.text_config.is_encoder_decoder
lowerCamelCase : Optional[Any] = num_query_tokens
lowerCamelCase : int = self.vision_config.hidden_size
lowerCamelCase : Tuple = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase : Dict = 1.0
lowerCamelCase : List[Any] = 0.02
@classmethod
def _lowercase ( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ , ) -> str:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase__ , )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Tuple = copy.deepcopy(self.__dict__ )
lowerCamelCase : Tuple = self.vision_config.to_dict()
lowerCamelCase : int = self.qformer_config.to_dict()
lowerCamelCase : Optional[Any] = self.text_config.to_dict()
lowerCamelCase : int = self.__class__.model_type
return output
| 48
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> list:
lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = []
for i in range(len(_SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
lowerCamelCase : Dict = True
for j in range(_SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
lowerCamelCase : Optional[int] = False
break
if match_found:
position.append(_SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 48
| 1
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> Any:
lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowerCamelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> str:
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase : Optional[int] = ""
else:
lowerCamelCase : List[str] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase : List[str] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase : Optional[int] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : List[Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
lowerCamelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : List[Any] = in_proj_bias[-config.hidden_size :]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : List[str] = dct.pop(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = val
def A ( ) -> List[str]:
lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : str = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowerCamelCase : Union[str, Any] = DeiTConfig()
# all deit models have fine-tuned heads
lowerCamelCase : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase : Dict = 1000
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "imagenet-1k-id2label.json"
lowerCamelCase : List[Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) )
lowerCamelCase : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Dict = int(deit_name[-6:-4] )
lowerCamelCase : Optional[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
lowerCamelCase : Optional[Any] = 192
lowerCamelCase : List[str] = 768
lowerCamelCase : Tuple = 12
lowerCamelCase : Optional[Any] = 3
elif deit_name[9:].startswith("small" ):
lowerCamelCase : str = 384
lowerCamelCase : Optional[Any] = 1536
lowerCamelCase : Dict = 12
lowerCamelCase : Optional[int] = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
lowerCamelCase : str = 1024
lowerCamelCase : List[str] = 4096
lowerCamelCase : Any = 24
lowerCamelCase : Dict = 16
# load original model from timm
lowerCamelCase : List[Any] = timm.create_model(_SCREAMING_SNAKE_CASE ,pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase : Dict = timm_model.state_dict()
lowerCamelCase : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowerCamelCase : Optional[Any] = DeiTForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by DeiTImageProcessor
lowerCamelCase : Any = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=_SCREAMING_SNAKE_CASE ,crop_size=config.image_size )
lowerCamelCase : str = image_processor(images=prepare_img() ,return_tensors="pt" )
lowerCamelCase : int = encoding["pixel_values"]
lowerCamelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = timm_model(_SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE ,outputs.logits ,atol=1e-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 48
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
|
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def A ( _SCREAMING_SNAKE_CASE ) -> tuple:
return (data["data"], data["target"])
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> np.ndarray:
lowerCamelCase : List[str] = XGBRegressor(verbosity=0 ,random_state=42 )
xgb.fit(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Predict target for test data
lowerCamelCase : List[Any] = xgb.predict(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = predictions.reshape(len(_SCREAMING_SNAKE_CASE ) ,1 )
return predictions
def A ( ) -> None:
lowerCamelCase : Dict = fetch_california_housing()
lowerCamelCase , lowerCamelCase : Tuple = data_handling(_SCREAMING_SNAKE_CASE )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = train_test_split(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,test_size=0.25 ,random_state=1 )
lowerCamelCase : Any = xgboost(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}''' )
print(f'''Mean Square Error : {mean_squared_error(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 48
| 1
|
import torch
from diffusers import DiffusionPipeline
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
super().__init__()
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
def __call__( self ) -> List[Any]:
lowerCamelCase : Dict = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
lowerCamelCase : Optional[Any] = 1
lowerCamelCase : Optional[Any] = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
lowerCamelCase : int = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
lowerCamelCase : Optional[Any] = scheduler_output - scheduler_output + torch.ones_like(UpperCamelCase__ )
return result
| 48
|
from math import sqrt
def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int:
lowerCamelCase : int = 0
lowerCamelCase : int = 0
lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_SCREAMING_SNAKE_CASE ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 48
| 1
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.getLogger(__name__)
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> Union[str, Any]:
super().__init__(
UpperCamelCase__ , question_encoder_tokenizer=UpperCamelCase__ , generator_tokenizer=UpperCamelCase__ , index=UpperCamelCase__ , init_retrieval=UpperCamelCase__ , )
lowerCamelCase : Dict = None
def _lowercase ( self , UpperCamelCase__ ) -> Union[str, Any]:
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
lowerCamelCase : int = self._infer_socket_ifname()
# avoid clash with the NCCL port
lowerCamelCase : List[str] = str(distributed_port + 1 )
lowerCamelCase : List[str] = dist.new_group(ranks=UpperCamelCase__ , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def _lowercase ( self ) -> Tuple:
return dist.get_rank(group=self.process_group ) == 0
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=torch.floataa ) -> Dict:
lowerCamelCase : List[Any] = torch.empty(UpperCamelCase__ , dtype=UpperCamelCase__ )
dist.scatter(UpperCamelCase__ , src=0 , scatter_list=UpperCamelCase__ , group=self.process_group )
return target_tensor
def _lowercase ( self ) -> str:
lowerCamelCase : Any = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
lowerCamelCase : Dict = next((addr for addr in addrs if addr.startswith("e" )) , UpperCamelCase__ )
return ifname
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
lowerCamelCase , lowerCamelCase : Optional[Any] = self._main_retrieve(UpperCamelCase__ , UpperCamelCase__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCamelCase__ )
# distributed training
lowerCamelCase : List[str] = dist.get_world_size(group=self.process_group )
# gather logic
lowerCamelCase : Dict = None
if self._is_main():
lowerCamelCase : Any = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(UpperCamelCase__ )]
dist.gather(torch.tensor(UpperCamelCase__ ) , dst=0 , gather_list=UpperCamelCase__ , group=self.process_group )
# scatter logic
lowerCamelCase : Tuple = question_hidden_states.shape[0]
lowerCamelCase : List[Any] = []
lowerCamelCase : Dict = []
if self._is_main():
assert len(UpperCamelCase__ ) == world_size
lowerCamelCase , lowerCamelCase : List[str] = self._main_retrieve(torch.cat(UpperCamelCase__ ).numpy() , UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : Tuple = torch.tensor(UpperCamelCase__ ), torch.tensor(UpperCamelCase__ )
lowerCamelCase : int = self._chunk_tensor(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : List[str] = self._chunk_tensor(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : List[str] = self._scattered(UpperCamelCase__ , [n_queries, n_docs] , target_type=torch.intaa )
lowerCamelCase : Union[str, Any] = self._scattered(UpperCamelCase__ , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(UpperCamelCase__ )
| 48
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
SCREAMING_SNAKE_CASE__ : Dict = logging.getLogger(__name__)
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = """sequence-classification"""
def __init__( self , UpperCamelCase__ ) -> List[Any]:
if type(UpperCamelCase__ ) == dict:
lowerCamelCase : int = Namespace(**UpperCamelCase__ )
lowerCamelCase : str = glue_output_modes[hparams.task]
lowerCamelCase : int = glue_tasks_num_labels[hparams.task]
super().__init__(UpperCamelCase__ , UpperCamelCase__ , self.mode )
def _lowercase ( self , **UpperCamelCase__ ) -> Tuple:
return self.model(**UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
lowerCamelCase : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCamelCase : List[str] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
lowerCamelCase : Optional[int] = self(**UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = outputs[0]
lowerCamelCase : str = self.trainer.lr_schedulers[0]["scheduler"]
lowerCamelCase : Optional[int] = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _lowercase ( self ) -> str:
lowerCamelCase : Any = self.hparams
lowerCamelCase : Union[str, Any] = processors[args.task]()
lowerCamelCase : Optional[int] = processor.get_labels()
for mode in ["train", "dev"]:
lowerCamelCase : Optional[Any] = self._feature_file(UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , UpperCamelCase__ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowerCamelCase : List[str] = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
lowerCamelCase : Dict = convert_examples_to_features(
UpperCamelCase__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , UpperCamelCase__ )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> DataLoader:
lowerCamelCase : str = "dev" if mode == "test" else mode
lowerCamelCase : int = self._feature_file(UpperCamelCase__ )
logger.info("Loading features from cached file %s" , UpperCamelCase__ )
lowerCamelCase : str = torch.load(UpperCamelCase__ )
lowerCamelCase : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCamelCase : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
lowerCamelCase : List[str] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowerCamelCase : Any = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowerCamelCase : Union[str, Any] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , batch_size=UpperCamelCase__ , shuffle=UpperCamelCase__ , )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
lowerCamelCase : Dict = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCamelCase : Tuple = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
lowerCamelCase : Dict = self(**UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : Any = outputs[:2]
lowerCamelCase : Union[str, Any] = logits.detach().cpu().numpy()
lowerCamelCase : Optional[Any] = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowercase ( self , UpperCamelCase__ ) -> tuple:
lowerCamelCase : Union[str, Any] = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
lowerCamelCase : Optional[int] = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
lowerCamelCase : Union[str, Any] = np.argmax(UpperCamelCase__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowerCamelCase : str = np.squeeze(UpperCamelCase__ )
lowerCamelCase : List[Any] = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowerCamelCase : List[str] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase : Dict = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , UpperCamelCase__ , UpperCamelCase__ )}
lowerCamelCase : List[str] = dict(results.items() )
lowerCamelCase : Optional[int] = results
return ret, preds_list, out_label_list
def _lowercase ( self , UpperCamelCase__ ) -> dict:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = self._eval_end(UpperCamelCase__ )
lowerCamelCase : str = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowercase ( self , UpperCamelCase__ ) -> dict:
lowerCamelCase , lowerCamelCase , lowerCamelCase : str = self._eval_end(UpperCamelCase__ )
lowerCamelCase : str = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ )
parser.add_argument(
"--max_seq_length" , default=128 , type=UpperCamelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=UpperCamelCase__ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def A ( ) -> int:
lowerCamelCase : int = argparse.ArgumentParser()
add_generic_args(_SCREAMING_SNAKE_CASE ,os.getcwd() )
lowerCamelCase : str = GLUETransformer.add_model_specific_args(_SCREAMING_SNAKE_CASE ,os.getcwd() )
lowerCamelCase : str = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCamelCase : int = os.path.join(
"./results" ,f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' ,)
os.makedirs(args.output_dir )
lowerCamelCase : int = GLUETransformer(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Dict = generic_train(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir ,"checkpoint-epoch=*.ckpt" ) ,recursive=_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Tuple = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 48
| 1
|
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ ) -> List[Any]:
lowerCamelCase : int = val
lowerCamelCase : Optional[int] = None
lowerCamelCase : Any = None
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
if self.val:
if val < self.val:
if self.left is None:
lowerCamelCase : Union[str, Any] = Node(UpperCamelCase__ )
else:
self.left.insert(UpperCamelCase__ )
elif val > self.val:
if self.right is None:
lowerCamelCase : Optional[Any] = Node(UpperCamelCase__ )
else:
self.right.insert(UpperCamelCase__ )
else:
lowerCamelCase : List[str] = val
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
# Recursive traversal
if root:
inorder(root.left ,_SCREAMING_SNAKE_CASE )
res.append(root.val )
inorder(root.right ,_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE ) -> Dict:
# Build BST
if len(_SCREAMING_SNAKE_CASE ) == 0:
return arr
lowerCamelCase : List[Any] = Node(arr[0] )
for i in range(1 ,len(_SCREAMING_SNAKE_CASE ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowerCamelCase : Optional[Any] = []
inorder(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 48
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
lowerCamelCase : str = (boundary[1] - boundary[0]) / steps
lowerCamelCase : List[str] = boundary[0]
lowerCamelCase : Union[str, Any] = boundary[1]
lowerCamelCase : int = make_points(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = 0.0
y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE )
for i in x_i:
# print(i)
y += h * f(_SCREAMING_SNAKE_CASE )
y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE )
return y
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
lowerCamelCase : int = a + h
while x < (b - h):
yield x
lowerCamelCase : List[str] = x + h
def A ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # enter your function here
lowerCamelCase : str = (x - 0) * (x - 0)
return y
def A ( ) -> int:
lowerCamelCase : int = 0.0 # Lower bound of integration
lowerCamelCase : int = 1.0 # Upper bound of integration
lowerCamelCase : Dict = 10.0 # define number of steps or resolution
lowerCamelCase : int = [a, b] # define boundary of integration
lowerCamelCase : str = method_a(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 48
| 1
|
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = XLMTokenizer
lowerCamelCase_ : Dict = False
def _lowercase ( self ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowerCamelCase : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase : Any = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(UpperCamelCase__ ) )
def _lowercase ( self , UpperCamelCase__ ) -> str:
lowerCamelCase : Union[str, Any] = "lower newer"
lowerCamelCase : List[Any] = "lower newer"
return input_text, output_text
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : str = XLMTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase : Tuple = "lower"
lowerCamelCase : Optional[int] = ["low", "er</w>"]
lowerCamelCase : Any = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Dict = tokens + ["<unk>"]
lowerCamelCase : List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
@slow
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Union[str, Any] = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
lowerCamelCase : int = tokenizer.encode("sequence builders" , add_special_tokens=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 48
|
def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int:
lowerCamelCase : Tuple = 1
lowerCamelCase : int = 1
lowerCamelCase : Optional[Any] = {1: 1}
for inputa in range(2 ,_SCREAMING_SNAKE_CASE ):
lowerCamelCase : Union[str, Any] = 0
lowerCamelCase : List[str] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCamelCase : str = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCamelCase : str = counter
if counter > pre_counter:
lowerCamelCase : str = inputa
lowerCamelCase : Any = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 48
| 1
|
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE__ : Dict = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=8 ) -> Optional[int]:
lowerCamelCase : Union[str, Any] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowerCamelCase : Any = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> Any:
super().__init__()
self.register_modules(
text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , movq=UpperCamelCase__ , )
lowerCamelCase : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
if latents is None:
lowerCamelCase : Dict = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=UpperCamelCase__ , dtype=UpperCamelCase__ )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowerCamelCase : str = latents.to(UpperCamelCase__ )
lowerCamelCase : Dict = latents * scheduler.init_noise_sigma
return latents
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , ) -> Tuple:
lowerCamelCase : int = len(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else 1
# get prompt text embeddings
lowerCamelCase : Optional[int] = self.tokenizer(
UpperCamelCase__ , padding="max_length" , truncation=UpperCamelCase__ , max_length=77 , return_attention_mask=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors="pt" , )
lowerCamelCase : str = text_inputs.input_ids
lowerCamelCase : Dict = self.tokenizer(UpperCamelCase__ , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : str = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowerCamelCase : int = text_input_ids.to(UpperCamelCase__ )
lowerCamelCase : int = text_inputs.attention_mask.to(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : Union[str, Any] = self.text_encoder(
input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ )
lowerCamelCase : Any = prompt_embeds.repeat_interleave(UpperCamelCase__ , dim=0 )
lowerCamelCase : str = text_encoder_hidden_states.repeat_interleave(UpperCamelCase__ , dim=0 )
lowerCamelCase : List[str] = text_mask.repeat_interleave(UpperCamelCase__ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase : List[str]
if negative_prompt is None:
lowerCamelCase : Optional[int] = [""] * batch_size
elif type(UpperCamelCase__ ) is not type(UpperCamelCase__ ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(UpperCamelCase__ )} !='''
F''' {type(UpperCamelCase__ )}.''' )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Any = [negative_prompt]
elif batch_size != len(UpperCamelCase__ ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(UpperCamelCase__ )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
" the batch size of `prompt`." )
else:
lowerCamelCase : int = negative_prompt
lowerCamelCase : str = self.tokenizer(
UpperCamelCase__ , padding="max_length" , max_length=77 , truncation=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors="pt" , )
lowerCamelCase : Optional[int] = uncond_input.input_ids.to(UpperCamelCase__ )
lowerCamelCase : Any = uncond_input.attention_mask.to(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : Any = self.text_encoder(
input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase : Tuple = negative_prompt_embeds.shape[1]
lowerCamelCase : str = negative_prompt_embeds.repeat(1 , UpperCamelCase__ )
lowerCamelCase : List[str] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCamelCase__ )
lowerCamelCase : int = uncond_text_encoder_hidden_states.shape[1]
lowerCamelCase : str = uncond_text_encoder_hidden_states.repeat(1 , UpperCamelCase__ , 1 )
lowerCamelCase : List[str] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , UpperCamelCase__ , -1 )
lowerCamelCase : Any = uncond_text_mask.repeat_interleave(UpperCamelCase__ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowerCamelCase : Union[str, Any] = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowerCamelCase : List[Any] = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def _lowercase ( self , UpperCamelCase__=0 ) -> Union[str, Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase : Union[str, Any] = torch.device(F'''cuda:{gpu_id}''' )
lowerCamelCase : Any = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__=0 ) -> Dict:
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowerCamelCase : List[Any] = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=UpperCamelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase : str = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowerCamelCase , lowerCamelCase : Union[str, Any] = cpu_offload_with_hook(UpperCamelCase__ , UpperCamelCase__ , prev_module_hook=UpperCamelCase__ )
if self.safety_checker is not None:
lowerCamelCase , lowerCamelCase : int = cpu_offload_with_hook(self.safety_checker , UpperCamelCase__ , prev_module_hook=UpperCamelCase__ )
# We'll offload the last model manually.
lowerCamelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self ) -> List[Any]:
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase__ )
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = 512 , UpperCamelCase__ = 512 , UpperCamelCase__ = 100 , UpperCamelCase__ = 4.0 , UpperCamelCase__ = 1 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , ) -> Union[str, Any]:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Optional[int] = 1
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Any = len(UpperCamelCase__ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase__ )}''' )
lowerCamelCase : Tuple = self._execution_device
lowerCamelCase : int = batch_size * num_images_per_prompt
lowerCamelCase : Dict = guidance_scale > 1.0
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = self._encode_prompt(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : int = torch.cat(UpperCamelCase__ , dim=0 )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = torch.cat(UpperCamelCase__ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase : List[str] = image_embeds.repeat_interleave(UpperCamelCase__ , dim=0 )
lowerCamelCase : Optional[Any] = negative_image_embeds.repeat_interleave(UpperCamelCase__ , dim=0 )
lowerCamelCase : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=UpperCamelCase__ )
self.scheduler.set_timesteps(UpperCamelCase__ , device=UpperCamelCase__ )
lowerCamelCase : int = self.scheduler.timesteps
lowerCamelCase : Optional[Any] = self.unet.config.in_channels
lowerCamelCase , lowerCamelCase : List[str] = get_new_h_w(UpperCamelCase__ , UpperCamelCase__ , self.movq_scale_factor )
# create initial latent
lowerCamelCase : List[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase : str = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
lowerCamelCase : List[Any] = self.unet(
sample=UpperCamelCase__ , timestep=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , added_cond_kwargs=UpperCamelCase__ , return_dict=UpperCamelCase__ , )[0]
if do_classifier_free_guidance:
lowerCamelCase , lowerCamelCase : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
lowerCamelCase , lowerCamelCase : Tuple = noise_pred.chunk(2 )
lowerCamelCase , lowerCamelCase : Tuple = variance_pred.chunk(2 )
lowerCamelCase : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase , lowerCamelCase : Any = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase : Optional[int] = self.scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ , ).prev_sample
# post-processing
lowerCamelCase : Any = self.movq.decode(UpperCamelCase__ , force_not_quantize=UpperCamelCase__ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowerCamelCase : List[str] = image * 0.5 + 0.5
lowerCamelCase : Union[str, Any] = image.clamp(0 , 1 )
lowerCamelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase : Union[str, Any] = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 48
|
import argparse
import os
import re
SCREAMING_SNAKE_CASE__ : List[Any] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ) -> int:
with open(_SCREAMING_SNAKE_CASE ,"r" ,encoding="utf-8" ) as f:
lowerCamelCase : List[Any] = f.read()
lowerCamelCase : str = content.split("\n" )
lowerCamelCase : int = []
lowerCamelCase : List[Any] = 0
while line_idx < len(_SCREAMING_SNAKE_CASE ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowerCamelCase : Optional[int] = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowerCamelCase : Optional[int] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowerCamelCase : List[str] = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowerCamelCase : Union[str, Any] = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : _re_identifier.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(_SCREAMING_SNAKE_CASE ) )
elif "\n".join(_SCREAMING_SNAKE_CASE ) != content:
return True
def A ( _SCREAMING_SNAKE_CASE = False ) -> List[str]:
lowerCamelCase : str = [os.path.join(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for f in os.listdir(_SCREAMING_SNAKE_CASE ) if f.endswith(".py" )]
lowerCamelCase : Union[str, Any] = [sort_auto_mapping(_SCREAMING_SNAKE_CASE ,overwrite=_SCREAMING_SNAKE_CASE ) for fname in fnames]
if not overwrite and any(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : str = [f for f, d in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_SCREAMING_SNAKE_CASE )}. Run `make style` to fix'''
" this." )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 48
| 1
|
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : str = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
|
def A ( _SCREAMING_SNAKE_CASE ) -> list:
if n_term == "":
return []
lowerCamelCase : list = []
for temp in range(int(_SCREAMING_SNAKE_CASE ) ):
series.append(f'''1/{temp + 1}''' if series else "1" )
return series
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Dict = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 48
| 1
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 48
|
from __future__ import annotations
import requests
def A ( _SCREAMING_SNAKE_CASE ) -> dict:
lowerCamelCase : Tuple = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_SCREAMING_SNAKE_CASE ).json()
def A ( _SCREAMING_SNAKE_CASE = 10 ) -> list[dict]:
lowerCamelCase : str = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
lowerCamelCase : Any = requests.get(_SCREAMING_SNAKE_CASE ).json()[:max_stories]
return [get_hackernews_story(_SCREAMING_SNAKE_CASE ) for story_id in story_ids]
def A ( _SCREAMING_SNAKE_CASE = 10 ) -> str:
lowerCamelCase : str = hackernews_top_stories(_SCREAMING_SNAKE_CASE )
return "\n".join("* [{title}]({url})".format(**_SCREAMING_SNAKE_CASE ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 48
| 1
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : str = XLMRobertaTokenizer
lowerCamelCase_ : str = XLMRobertaTokenizerFast
lowerCamelCase_ : int = True
lowerCamelCase_ : List[Any] = True
def _lowercase ( self ) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : Any = XLMRobertaTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self ) -> List[str]:
lowerCamelCase : Optional[int] = "<pad>"
lowerCamelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(UpperCamelCase__ ) , 1002 )
def _lowercase ( self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def _lowercase ( self ) -> Any:
lowerCamelCase : List[str] = XLMRobertaTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
lowerCamelCase : str = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCamelCase : Optional[int] = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def _lowercase ( self ) -> Any:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase : Dict = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Any = self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Optional[Any] = tempfile.mkdtemp()
lowerCamelCase : Dict = tokenizer_r.save_pretrained(UpperCamelCase__ )
lowerCamelCase : Tuple = tokenizer_p.save_pretrained(UpperCamelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
lowerCamelCase : Any = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(UpperCamelCase__ , UpperCamelCase__ )
# Checks everything loads correctly in the same way
lowerCamelCase : int = tokenizer_r.from_pretrained(UpperCamelCase__ )
lowerCamelCase : Any = tokenizer_p.from_pretrained(UpperCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCamelCase__ )
# Save tokenizer rust, legacy_format=True
lowerCamelCase : str = tempfile.mkdtemp()
lowerCamelCase : int = tokenizer_r.save_pretrained(UpperCamelCase__ , legacy_format=UpperCamelCase__ )
lowerCamelCase : List[str] = tokenizer_p.save_pretrained(UpperCamelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCamelCase__ , UpperCamelCase__ )
# Checks everything loads correctly in the same way
lowerCamelCase : Optional[int] = tokenizer_r.from_pretrained(UpperCamelCase__ )
lowerCamelCase : Any = tokenizer_p.from_pretrained(UpperCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) )
shutil.rmtree(UpperCamelCase__ )
# Save tokenizer rust, legacy_format=False
lowerCamelCase : List[str] = tempfile.mkdtemp()
lowerCamelCase : Any = tokenizer_r.save_pretrained(UpperCamelCase__ , legacy_format=UpperCamelCase__ )
lowerCamelCase : int = tokenizer_p.save_pretrained(UpperCamelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase : List[str] = tokenizer_r.from_pretrained(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = tokenizer_p.from_pretrained(UpperCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) )
shutil.rmtree(UpperCamelCase__ )
@cached_property
def _lowercase ( self ) -> List[str]:
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def _lowercase ( self ) -> List[Any]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCamelCase__ , f.name )
lowerCamelCase : Optional[Any] = XLMRobertaTokenizer(f.name , keep_accents=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = pickle.dumps(UpperCamelCase__ )
pickle.loads(UpperCamelCase__ )
def _lowercase ( self ) -> int:
if not self.test_rust_tokenizer:
return
lowerCamelCase : Dict = self.get_tokenizer()
lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
lowerCamelCase : int = "I was born in 92000, and this is falsé."
lowerCamelCase : List[str] = tokenizer.tokenize(UpperCamelCase__ )
lowerCamelCase : List[Any] = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : str = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
lowerCamelCase : List[str] = tokenizer.encode(UpperCamelCase__ )
lowerCamelCase : int = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def _lowercase ( self ) -> Dict:
lowerCamelCase : str = "Hello World!"
lowerCamelCase : Optional[Any] = [0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def _lowercase ( self ) -> int:
lowerCamelCase : List[str] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
lowerCamelCase : Union[str, Any] = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(UpperCamelCase__ , self.big_tokenizer.encode(UpperCamelCase__ ) )
@slow
def _lowercase ( self ) -> Any:
# fmt: off
lowerCamelCase : int = {"input_ids": [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
| 48
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {
'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json',
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = """blip_2_vision_model"""
def __init__( self , UpperCamelCase__=1408 , UpperCamelCase__=6144 , UpperCamelCase__=39 , UpperCamelCase__=16 , UpperCamelCase__=224 , UpperCamelCase__=14 , UpperCamelCase__="gelu" , UpperCamelCase__=0.00001 , UpperCamelCase__=0.0 , UpperCamelCase__=1e-10 , UpperCamelCase__=True , **UpperCamelCase__ , ) -> Optional[Any]:
super().__init__(**UpperCamelCase__ )
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Union[str, Any] = intermediate_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Dict = patch_size
lowerCamelCase : Tuple = image_size
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Union[str, Any] = attention_dropout
lowerCamelCase : Dict = layer_norm_eps
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : str = qkv_bias
@classmethod
def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : List[str] = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
lowerCamelCase : Optional[int] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = """blip_2_qformer"""
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0 , UpperCamelCase__="absolute" , UpperCamelCase__=2 , UpperCamelCase__=1408 , **UpperCamelCase__ , ) -> int:
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : Dict = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : int = hidden_act
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Dict = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Dict = max_position_embeddings
lowerCamelCase : List[str] = initializer_range
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : int = position_embedding_type
lowerCamelCase : Tuple = cross_attention_frequency
lowerCamelCase : Optional[int] = encoder_hidden_size
@classmethod
def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : str = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
lowerCamelCase : int = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : List[str] = """blip-2"""
lowerCamelCase_ : int = True
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=32 , **UpperCamelCase__ ) -> str:
super().__init__(**UpperCamelCase__ )
if vision_config is None:
lowerCamelCase : List[Any] = {}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." )
if qformer_config is None:
lowerCamelCase : List[Any] = {}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." )
if text_config is None:
lowerCamelCase : Any = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
lowerCamelCase : Optional[int] = BlipaVisionConfig(**UpperCamelCase__ )
lowerCamelCase : str = BlipaQFormerConfig(**UpperCamelCase__ )
lowerCamelCase : List[str] = text_config["model_type"] if "model_type" in text_config else "opt"
lowerCamelCase : str = CONFIG_MAPPING[text_model_type](**UpperCamelCase__ )
lowerCamelCase : Optional[Any] = self.text_config.tie_word_embeddings
lowerCamelCase : int = self.text_config.is_encoder_decoder
lowerCamelCase : Optional[Any] = num_query_tokens
lowerCamelCase : int = self.vision_config.hidden_size
lowerCamelCase : Tuple = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase : Dict = 1.0
lowerCamelCase : List[Any] = 0.02
@classmethod
def _lowercase ( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ , ) -> str:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase__ , )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Tuple = copy.deepcopy(self.__dict__ )
lowerCamelCase : Tuple = self.vision_config.to_dict()
lowerCamelCase : int = self.qformer_config.to_dict()
lowerCamelCase : Optional[Any] = self.text_config.to_dict()
lowerCamelCase : int = self.__class__.model_type
return output
| 48
| 1
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A ( _SCREAMING_SNAKE_CASE ) -> bool:
lowerCamelCase : int = int(number**0.5 )
return number == sq * sq
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> tuple[int, int]:
lowerCamelCase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowerCamelCase : int = x_den * y_den * z_den
lowerCamelCase : int = gcd(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
top //= hcf
bottom //= hcf
return top, bottom
def A ( _SCREAMING_SNAKE_CASE = 35 ) -> int:
lowerCamelCase : set = set()
lowerCamelCase : int
lowerCamelCase : Fraction = Fraction(0 )
lowerCamelCase : tuple[int, int]
for x_num in range(1 ,order + 1 ):
for x_den in range(x_num + 1 ,order + 1 ):
for y_num in range(1 ,order + 1 ):
for y_den in range(y_num + 1 ,order + 1 ):
# n=1
lowerCamelCase : Dict = x_num * y_den + x_den * y_num
lowerCamelCase : Any = x_den * y_den
lowerCamelCase : int = gcd(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase : Tuple = add_three(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
lowerCamelCase : Dict = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowerCamelCase : Any = x_den * x_den * y_den * y_den
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : List[str] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Tuple = int(sqrt(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : int = gcd(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase : Optional[Any] = add_three(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=-1
lowerCamelCase : int = x_num * y_num
lowerCamelCase : int = x_den * y_num + x_num * y_den
lowerCamelCase : int = gcd(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase : Union[str, Any] = add_three(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
# n=2
lowerCamelCase : str = x_num * x_num * y_num * y_num
lowerCamelCase : List[str] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_SCREAMING_SNAKE_CASE ) and is_sq(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : str = int(sqrt(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : List[Any] = int(sqrt(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Any = gcd(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCamelCase : Optional[int] = add_three(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
unique_s.add(_SCREAMING_SNAKE_CASE )
for num, den in unique_s:
total += Fraction(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 48
|
import random
from .binary_exp_mod import bin_exp_mod
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=1000 ) -> List[str]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCamelCase : List[Any] = n - 1
lowerCamelCase : Dict = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCamelCase : Optional[Any] = 0
while count < prec:
lowerCamelCase : str = random.randint(2 ,n - 1 )
lowerCamelCase : Dict = bin_exp_mod(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if b != 1:
lowerCamelCase : str = True
for _ in range(_SCREAMING_SNAKE_CASE ):
if b == n - 1:
lowerCamelCase : Tuple = False
break
lowerCamelCase : int = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 48
| 1
|
def A ( _SCREAMING_SNAKE_CASE ) -> int:
assert (
isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) and number_of_steps > 0
), f'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
lowerCamelCase , lowerCamelCase : Dict = 1, 1
for _ in range(number_of_steps - 1 ):
lowerCamelCase , lowerCamelCase : Optional[Any] = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE__ : int = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE__ : str = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Tuple = 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : List[str] = 3
SCREAMING_SNAKE_CASE__ : Optional[int] = 4
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = VOCAB_FILES_NAMES
lowerCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : List[str] = """left"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<sep>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<cls>" , UpperCamelCase__="<mask>" , UpperCamelCase__=["<eop>", "<eod>"] , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
lowerCamelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
lowerCamelCase : Any = 3
lowerCamelCase : Optional[Any] = do_lower_case
lowerCamelCase : List[Any] = remove_space
lowerCamelCase : str = keep_accents
lowerCamelCase : List[Any] = vocab_file
lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def _lowercase ( self ) -> Optional[Any]:
return len(self.sp_model )
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : int = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
lowerCamelCase : Optional[int] = self.__dict__.copy()
lowerCamelCase : Union[str, Any] = None
return state
def __setstate__( self , UpperCamelCase__ ) -> int:
lowerCamelCase : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase : Any = {}
lowerCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self , UpperCamelCase__ ) -> Any:
if self.remove_space:
lowerCamelCase : Dict = " ".join(inputs.strip().split() )
else:
lowerCamelCase : Union[str, Any] = inputs
lowerCamelCase : Optional[Any] = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase : Optional[int] = unicodedata.normalize("NFKD" , UpperCamelCase__ )
lowerCamelCase : List[Any] = "".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
lowerCamelCase : List[str] = outputs.lower()
return outputs
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Optional[Any] = self.preprocess_text(UpperCamelCase__ )
lowerCamelCase : Dict = self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
lowerCamelCase : Dict = []
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase : Union[str, Any] = cur_pieces[1:]
else:
lowerCamelCase : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def _lowercase ( self , UpperCamelCase__ ) -> int:
return self.sp_model.PieceToId(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> Tuple:
return self.sp_model.IdToPiece(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = True , **UpperCamelCase__ , ) -> str:
lowerCamelCase : Optional[int] = kwargs.pop("use_source_tokenizer" , UpperCamelCase__ )
lowerCamelCase : Optional[int] = self.convert_ids_to_tokens(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase : Any = []
lowerCamelCase : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) )
lowerCamelCase : int = []
sub_texts.append(UpperCamelCase__ )
else:
current_sub_text.append(UpperCamelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ )
lowerCamelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase : int = self.clean_up_tokenization(UpperCamelCase__ )
return clean_text
else:
return text
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : str = [self.sep_token_id]
lowerCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1]
return ([0] * len(UpperCamelCase__ )) + [1, 1]
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : Any = [self.sep_token_id]
lowerCamelCase : List[str] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase : Union[str, Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 48
| 1
|
def A ( _SCREAMING_SNAKE_CASE ) -> Any:
lowerCamelCase : Optional[Any] = []
lowerCamelCase : str = set({"(", "[", "{"} )
lowerCamelCase : int = set({")", "]", "}"} )
lowerCamelCase : Union[str, Any] = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_SCREAMING_SNAKE_CASE ) == 0 or (len(_SCREAMING_SNAKE_CASE ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_SCREAMING_SNAKE_CASE ) == 0
def A ( ) -> Union[str, Any]:
lowerCamelCase : int = input("Enter sequence of brackets: " )
if is_balanced(_SCREAMING_SNAKE_CASE ):
print(_SCREAMING_SNAKE_CASE ,"is balanced" )
else:
print(_SCREAMING_SNAKE_CASE ,"is not balanced" )
if __name__ == "__main__":
main()
| 48
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
SCREAMING_SNAKE_CASE__ : Any = {
'b0': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 224,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 240,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 1408,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 260,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 1536,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 300,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 1792,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 380,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 2048,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 456,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 2304,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 528,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 2560,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 600,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def A ( _SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : int = EfficientNetConfig()
lowerCamelCase : List[str] = CONFIG_MAP[model_name]["hidden_dim"]
lowerCamelCase : List[str] = CONFIG_MAP[model_name]["width_coef"]
lowerCamelCase : Any = CONFIG_MAP[model_name]["depth_coef"]
lowerCamelCase : Union[str, Any] = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : Optional[int] = CONFIG_MAP[model_name]["dropout_rate"]
lowerCamelCase : str = CONFIG_MAP[model_name]["dw_padding"]
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "imagenet-1k-id2label.json"
lowerCamelCase : Any = 1000
lowerCamelCase : Any = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) )
lowerCamelCase : List[str] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : Any = {v: k for k, v in idalabel.items()}
return config
def A ( ) -> int:
lowerCamelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def A ( _SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : List[Any] = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : str = EfficientNetImageProcessor(
size={"height": size, "width": size} ,image_mean=[0.485, 0.456, 0.406] ,image_std=[0.47853944, 0.4732864, 0.47434163] ,do_center_crop=_SCREAMING_SNAKE_CASE ,)
return preprocessor
def A ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowerCamelCase : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
lowerCamelCase : Any = sorted(set(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE ,range(_SCREAMING_SNAKE_CASE ) )}
lowerCamelCase : List[Any] = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
lowerCamelCase : Dict = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
lowerCamelCase : Optional[int] = {}
for item in rename_keys:
if item[0] in original_param_names:
lowerCamelCase : List[str] = "efficientnet." + item[1]
lowerCamelCase : int = "classifier.weight"
lowerCamelCase : Union[str, Any] = "classifier.bias"
return key_mapping
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowerCamelCase : Tuple = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowerCamelCase : List[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 ,2 ,0 ,1 )
elif "depthwise_kernel" in key:
lowerCamelCase : int = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 ,3 ,0 ,1 )
elif "kernel" in key:
lowerCamelCase : List[str] = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) )
else:
lowerCamelCase : Optional[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowerCamelCase : Optional[int] = model_classes[model_name](
include_top=_SCREAMING_SNAKE_CASE ,weights="imagenet" ,input_tensor=_SCREAMING_SNAKE_CASE ,input_shape=_SCREAMING_SNAKE_CASE ,pooling=_SCREAMING_SNAKE_CASE ,classes=1000 ,classifier_activation="softmax" ,)
lowerCamelCase : List[Any] = original_model.trainable_variables
lowerCamelCase : Tuple = original_model.non_trainable_variables
lowerCamelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowerCamelCase : List[str] = param.numpy()
lowerCamelCase : int = list(tf_params.keys() )
# Load HuggingFace model
lowerCamelCase : Union[str, Any] = get_efficientnet_config(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
lowerCamelCase : Tuple = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
lowerCamelCase : Union[str, Any] = rename_keys(_SCREAMING_SNAKE_CASE )
replace_params(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
lowerCamelCase : int = convert_image_processor(_SCREAMING_SNAKE_CASE )
lowerCamelCase : int = preprocessor(images=prepare_img() ,return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowerCamelCase : Optional[Any] = hf_model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = outputs.logits.detach().numpy()
# Original model inference
lowerCamelCase : Optional[Any] = False
lowerCamelCase : Any = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : Optional[int] = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST )
lowerCamelCase : Union[str, Any] = image.img_to_array(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = np.expand_dims(_SCREAMING_SNAKE_CASE ,axis=0 )
lowerCamelCase : Dict = original_model.predict(_SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.mkdir(_SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
lowerCamelCase : int = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 48
| 1
|
def A ( _SCREAMING_SNAKE_CASE ) -> int:
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
raise TypeError("Input value must be a 'int' type" )
return bin(_SCREAMING_SNAKE_CASE ).count("1" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,) -> List[str]:
if config_name_or_path is None:
lowerCamelCase : Any = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
lowerCamelCase : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCamelCase : Any = question_encoder_name_or_path
lowerCamelCase : str = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
lowerCamelCase : List[Any] = RagConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = gen_config
lowerCamelCase : Optional[Any] = question_encoder_config
lowerCamelCase : List[Any] = model_class.from_pretrained_question_encoder_generator(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
rag_model.save_pretrained(_SCREAMING_SNAKE_CASE )
# Sanity check.
model_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# Save tokenizers.
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
lowerCamelCase : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Optional[Any] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 48
| 1
|
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = ["""input_values""", """attention_mask"""]
def __init__( self , UpperCamelCase__ = 1 , UpperCamelCase__ = 1_6000 , UpperCamelCase__ = 0.0 , UpperCamelCase__ = False , UpperCamelCase__ = 80 , UpperCamelCase__ = 16 , UpperCamelCase__ = 64 , UpperCamelCase__ = "hann_window" , UpperCamelCase__ = 1.0 , UpperCamelCase__ = 80 , UpperCamelCase__ = 7600 , UpperCamelCase__ = 1e-10 , UpperCamelCase__ = 2 , UpperCamelCase__ = True , **UpperCamelCase__ , ) -> Dict:
super().__init__(feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Any = do_normalize
lowerCamelCase : Tuple = return_attention_mask
lowerCamelCase : Optional[Any] = num_mel_bins
lowerCamelCase : Optional[int] = hop_length
lowerCamelCase : Dict = win_length
lowerCamelCase : Any = win_function
lowerCamelCase : Any = frame_signal_scale
lowerCamelCase : int = fmin
lowerCamelCase : int = fmax
lowerCamelCase : Optional[int] = mel_floor
lowerCamelCase : Any = reduction_factor
lowerCamelCase : Tuple = win_length * sampling_rate // 1000
lowerCamelCase : int = hop_length * sampling_rate // 1000
lowerCamelCase : int = optimal_fft_length(self.sample_size )
lowerCamelCase : List[str] = (self.n_fft // 2) + 1
lowerCamelCase : List[str] = window_function(window_length=self.sample_size , name=self.win_function , periodic=UpperCamelCase__ )
lowerCamelCase : int = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="slaney" , mel_scale="slaney" , )
if frame_signal_scale != 1.0:
warnings.warn(
"The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers" , UpperCamelCase__ , )
if reduction_factor != 2.0:
warnings.warn(
"The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers" , UpperCamelCase__ , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowercase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
lowerCamelCase : List[Any] = np.array(UpperCamelCase__ , np.intaa )
lowerCamelCase : str = []
for vector, length in zip(UpperCamelCase__ , attention_mask.sum(-1 ) ):
lowerCamelCase : Union[str, Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
lowerCamelCase : List[Any] = padding_value
normed_input_values.append(UpperCamelCase__ )
else:
lowerCamelCase : str = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def _lowercase ( self , UpperCamelCase__ , ) -> np.ndarray:
lowerCamelCase : Optional[int] = spectrogram(
UpperCamelCase__ , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="log10" , )
return log_mel_spec.T
def __call__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> BatchFeature:
if audio is None and audio_target is None:
raise ValueError("You must provide either `audio` or `audio_target` values." )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if audio is not None:
lowerCamelCase : Dict = self._process_audio(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ , )
else:
lowerCamelCase : Dict = None
if audio_target is not None:
lowerCamelCase : Optional[int] = self._process_audio(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ , )
if inputs is None:
return inputs_target
else:
lowerCamelCase : Optional[Any] = inputs_target["input_values"]
lowerCamelCase : List[str] = inputs_target.get("attention_mask" )
if decoder_attention_mask is not None:
lowerCamelCase : Dict = decoder_attention_mask
return inputs
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> BatchFeature:
lowerCamelCase : Dict = isinstance(UpperCamelCase__ , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
lowerCamelCase : Optional[int] = is_batched_numpy or (
isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase : Dict = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ):
lowerCamelCase : Optional[int] = np.asarray(UpperCamelCase__ , dtype=np.floataa )
elif isinstance(UpperCamelCase__ , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
lowerCamelCase : str = speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase : List[Any] = [speech]
# needed to make pad() work on spectrogram inputs
lowerCamelCase : Any = self.feature_size
# convert into correct format for padding
if is_target:
lowerCamelCase : List[Any] = [self._extract_mel_features(UpperCamelCase__ ) for waveform in speech]
lowerCamelCase : Union[str, Any] = BatchFeature({"input_values": features} )
lowerCamelCase : Any = self.num_mel_bins
else:
lowerCamelCase : List[str] = BatchFeature({"input_values": speech} )
lowerCamelCase : Tuple = self.pad(
UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase : Optional[int] = feature_size_hack
# convert input values to correct format
lowerCamelCase : Optional[Any] = padded_inputs["input_values"]
if not isinstance(input_values[0] , np.ndarray ):
lowerCamelCase : Any = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(UpperCamelCase__ , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
lowerCamelCase : Any = [array.astype(np.floataa ) for array in input_values]
elif isinstance(UpperCamelCase__ , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
lowerCamelCase : int = input_values.astype(np.floataa )
# convert attention_mask to correct format
lowerCamelCase : Any = padded_inputs.get("attention_mask" )
if attention_mask is not None:
lowerCamelCase : Dict = [np.asarray(UpperCamelCase__ , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
lowerCamelCase : Any = (
attention_mask
if self._get_padding_strategies(UpperCamelCase__ , max_length=UpperCamelCase__ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCamelCase : Any = self.zero_mean_unit_var_norm(
padded_inputs["input_values"] , attention_mask=UpperCamelCase__ , padding_value=self.padding_value )
if return_tensors is not None:
lowerCamelCase : Tuple = padded_inputs.convert_to_tensors(UpperCamelCase__ )
return padded_inputs
def _lowercase ( self ) -> Dict[str, Any]:
lowerCamelCase : Optional[int] = super().to_dict()
# Don't serialize these as they are derived from the other properties.
lowerCamelCase : Dict = ["window", "mel_filters", "sample_size", "sample_stride", "n_fft", "n_freqs"]
for name in names:
if name in output:
del output[name]
return output
| 48
|
import math
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float:
if (
not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float:
if (
not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
SCREAMING_SNAKE_CASE__ : int = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : bool = field(default=lowerCAmelCase__ , metadata={"""help""": """Whether to use SortishSampler or not."""} )
lowerCamelCase_ : bool = field(
default=lowerCAmelCase__ , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
lowerCamelCase_ : Optional[int] = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
lowerCamelCase_ : Optional[int] = field(
default=lowerCAmelCase__ , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
lowerCamelCase_ : Optional[Union[str, Path, GenerationConfig]] = field(
default=lowerCAmelCase__ , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : List[Any] = super().to_dict()
for k, v in d.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Dict = v.to_dict()
return d
| 48
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> Any:
lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowerCamelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> str:
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase : Optional[int] = ""
else:
lowerCamelCase : List[str] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase : List[str] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase : Optional[int] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : List[Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
lowerCamelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : List[Any] = in_proj_bias[-config.hidden_size :]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : List[str] = dct.pop(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = val
def A ( ) -> List[str]:
lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : str = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowerCamelCase : Union[str, Any] = DeiTConfig()
# all deit models have fine-tuned heads
lowerCamelCase : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase : Dict = 1000
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "imagenet-1k-id2label.json"
lowerCamelCase : List[Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) )
lowerCamelCase : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Dict = int(deit_name[-6:-4] )
lowerCamelCase : Optional[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
lowerCamelCase : Optional[Any] = 192
lowerCamelCase : List[str] = 768
lowerCamelCase : Tuple = 12
lowerCamelCase : Optional[Any] = 3
elif deit_name[9:].startswith("small" ):
lowerCamelCase : str = 384
lowerCamelCase : Optional[Any] = 1536
lowerCamelCase : Dict = 12
lowerCamelCase : Optional[int] = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
lowerCamelCase : str = 1024
lowerCamelCase : List[str] = 4096
lowerCamelCase : Any = 24
lowerCamelCase : Dict = 16
# load original model from timm
lowerCamelCase : List[Any] = timm.create_model(_SCREAMING_SNAKE_CASE ,pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase : Dict = timm_model.state_dict()
lowerCamelCase : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowerCamelCase : Optional[Any] = DeiTForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by DeiTImageProcessor
lowerCamelCase : Any = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=_SCREAMING_SNAKE_CASE ,crop_size=config.image_size )
lowerCamelCase : str = image_processor(images=prepare_img() ,return_tensors="pt" )
lowerCamelCase : int = encoding["pixel_values"]
lowerCamelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = timm_model(_SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE ,outputs.logits ,atol=1e-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 48
| 1
|
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=1 ) -> Optional[int]:
if n_shave_prefix_segments >= 0:
return ".".join(path.split("." )[n_shave_prefix_segments:] )
else:
return ".".join(path.split("." )[:n_shave_prefix_segments] )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=0 ) -> Optional[int]:
lowerCamelCase : Optional[Any] = []
for old_item in old_list:
lowerCamelCase : Dict = old_item.replace("in_layers.0" ,"norm1" )
lowerCamelCase : int = new_item.replace("in_layers.2" ,"conv1" )
lowerCamelCase : Any = new_item.replace("out_layers.0" ,"norm2" )
lowerCamelCase : Optional[Any] = new_item.replace("out_layers.3" ,"conv2" )
lowerCamelCase : List[Any] = new_item.replace("emb_layers.1" ,"time_emb_proj" )
lowerCamelCase : int = new_item.replace("skip_connection" ,"conv_shortcut" )
lowerCamelCase : Optional[int] = shave_segments(_SCREAMING_SNAKE_CASE ,n_shave_prefix_segments=_SCREAMING_SNAKE_CASE )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=0 ) -> Optional[Any]:
lowerCamelCase : List[Any] = []
for old_item in old_list:
lowerCamelCase : int = old_item
lowerCamelCase : int = new_item.replace("norm.weight" ,"group_norm.weight" )
lowerCamelCase : Optional[Any] = new_item.replace("norm.bias" ,"group_norm.bias" )
lowerCamelCase : Union[str, Any] = new_item.replace("proj_out.weight" ,"proj_attn.weight" )
lowerCamelCase : int = new_item.replace("proj_out.bias" ,"proj_attn.bias" )
lowerCamelCase : List[Any] = shave_segments(_SCREAMING_SNAKE_CASE ,n_shave_prefix_segments=_SCREAMING_SNAKE_CASE )
mapping.append({"old": old_item, "new": new_item} )
return mapping
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ) -> Dict:
assert isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
lowerCamelCase : Tuple = old_checkpoint[path]
lowerCamelCase : Optional[int] = old_tensor.shape[0] // 3
lowerCamelCase : Dict = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
lowerCamelCase : List[str] = old_tensor.shape[0] // config["num_head_channels"] // 3
lowerCamelCase : Tuple = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = old_tensor.split(channels // num_heads ,dim=1 )
lowerCamelCase : int = query.reshape(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = key.reshape(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = value.reshape(_SCREAMING_SNAKE_CASE )
for path in paths:
lowerCamelCase : Optional[Any] = path["new"]
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
lowerCamelCase : Optional[int] = new_path.replace("middle_block.0" ,"mid_block.resnets.0" )
lowerCamelCase : Optional[int] = new_path.replace("middle_block.1" ,"mid_block.attentions.0" )
lowerCamelCase : Optional[Any] = new_path.replace("middle_block.2" ,"mid_block.resnets.1" )
if additional_replacements is not None:
for replacement in additional_replacements:
lowerCamelCase : str = new_path.replace(replacement["old"] ,replacement["new"] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
lowerCamelCase : Optional[Any] = old_checkpoint[path["old"]][:, :, 0]
else:
lowerCamelCase : int = old_checkpoint[path["old"]]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[str]:
lowerCamelCase : Union[str, Any] = {}
lowerCamelCase : str = checkpoint["time_embed.0.weight"]
lowerCamelCase : str = checkpoint["time_embed.0.bias"]
lowerCamelCase : Tuple = checkpoint["time_embed.2.weight"]
lowerCamelCase : Any = checkpoint["time_embed.2.bias"]
lowerCamelCase : Any = checkpoint["input_blocks.0.0.weight"]
lowerCamelCase : Tuple = checkpoint["input_blocks.0.0.bias"]
lowerCamelCase : Dict = checkpoint["out.0.weight"]
lowerCamelCase : Dict = checkpoint["out.0.bias"]
lowerCamelCase : Union[str, Any] = checkpoint["out.2.weight"]
lowerCamelCase : Optional[int] = checkpoint["out.2.bias"]
# Retrieves the keys for the input blocks only
lowerCamelCase : List[Any] = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "input_blocks" in layer} )
lowerCamelCase : Any = {
layer_id: [key for key in checkpoint if f'''input_blocks.{layer_id}''' in key]
for layer_id in range(_SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the middle blocks only
lowerCamelCase : str = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "middle_block" in layer} )
lowerCamelCase : str = {
layer_id: [key for key in checkpoint if f'''middle_block.{layer_id}''' in key]
for layer_id in range(_SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the output blocks only
lowerCamelCase : List[Any] = len({".".join(layer.split("." )[:2] ) for layer in checkpoint if "output_blocks" in layer} )
lowerCamelCase : int = {
layer_id: [key for key in checkpoint if f'''output_blocks.{layer_id}''' in key]
for layer_id in range(_SCREAMING_SNAKE_CASE )
}
for i in range(1 ,_SCREAMING_SNAKE_CASE ):
lowerCamelCase : int = (i - 1) // (config["num_res_blocks"] + 1)
lowerCamelCase : int = (i - 1) % (config["num_res_blocks"] + 1)
lowerCamelCase : Any = [key for key in input_blocks[i] if f'''input_blocks.{i}.0''' in key]
lowerCamelCase : Optional[Any] = [key for key in input_blocks[i] if f'''input_blocks.{i}.1''' in key]
if f'''input_blocks.{i}.0.op.weight''' in checkpoint:
lowerCamelCase : List[str] = checkpoint[
f'''input_blocks.{i}.0.op.weight'''
]
lowerCamelCase : Dict = checkpoint[
f'''input_blocks.{i}.0.op.bias'''
]
continue
lowerCamelCase : Optional[int] = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = {"old": f'''input_blocks.{i}.0''', "new": f'''down_blocks.{block_id}.resnets.{layer_in_block_id}'''}
lowerCamelCase : int = {"old": "resnets.2.op", "new": "downsamplers.0.op"}
assign_to_checkpoint(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,additional_replacements=[meta_path, resnet_op] ,config=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : List[str] = renew_attention_paths(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = {
"old": f'''input_blocks.{i}.1''',
"new": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowerCamelCase : Tuple = {
f'''input_blocks.{i}.1.qkv.bias''': {
"key": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"query": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"value": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
f'''input_blocks.{i}.1.qkv.weight''': {
"key": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"query": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"value": f'''down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,additional_replacements=[meta_path] ,attention_paths_to_split=_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE ,)
lowerCamelCase : List[str] = middle_blocks[0]
lowerCamelCase : str = middle_blocks[1]
lowerCamelCase : Tuple = middle_blocks[2]
lowerCamelCase : int = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
assign_to_checkpoint(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
assign_to_checkpoint(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = renew_attention_paths(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = {
"middle_block.1.qkv.bias": {
"key": "mid_block.attentions.0.key.bias",
"query": "mid_block.attentions.0.query.bias",
"value": "mid_block.attentions.0.value.bias",
},
"middle_block.1.qkv.weight": {
"key": "mid_block.attentions.0.key.weight",
"query": "mid_block.attentions.0.query.weight",
"value": "mid_block.attentions.0.value.weight",
},
}
assign_to_checkpoint(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,attention_paths_to_split=_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : List[str] = i // (config["num_res_blocks"] + 1)
lowerCamelCase : Optional[Any] = i % (config["num_res_blocks"] + 1)
lowerCamelCase : List[str] = [shave_segments(_SCREAMING_SNAKE_CASE ,2 ) for name in output_blocks[i]]
lowerCamelCase : Optional[int] = {}
for layer in output_block_layers:
lowerCamelCase , lowerCamelCase : List[str] = layer.split("." )[0], shave_segments(_SCREAMING_SNAKE_CASE ,1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase : Optional[int] = [layer_name]
if len(_SCREAMING_SNAKE_CASE ) > 1:
lowerCamelCase : Optional[int] = [key for key in output_blocks[i] if f'''output_blocks.{i}.0''' in key]
lowerCamelCase : Any = [key for key in output_blocks[i] if f'''output_blocks.{i}.1''' in key]
lowerCamelCase : Optional[int] = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = renew_resnet_paths(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = {"old": f'''output_blocks.{i}.0''', "new": f'''up_blocks.{block_id}.resnets.{layer_in_block_id}'''}
assign_to_checkpoint(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,additional_replacements=[meta_path] ,config=_SCREAMING_SNAKE_CASE )
if ["conv.weight", "conv.bias"] in output_block_list.values():
lowerCamelCase : Optional[int] = list(output_block_list.values() ).index(["conv.weight", "conv.bias"] )
lowerCamelCase : Dict = checkpoint[
f'''output_blocks.{i}.{index}.conv.weight'''
]
lowerCamelCase : List[Any] = checkpoint[
f'''output_blocks.{i}.{index}.conv.bias'''
]
# Clear attentions as they have been attributed above.
if len(_SCREAMING_SNAKE_CASE ) == 2:
lowerCamelCase : List[str] = []
if len(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : Optional[Any] = renew_attention_paths(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = {
"old": f'''output_blocks.{i}.1''',
"new": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}''',
}
lowerCamelCase : Any = {
f'''output_blocks.{i}.1.qkv.bias''': {
"key": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias''',
"query": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias''',
"value": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias''',
},
f'''output_blocks.{i}.1.qkv.weight''': {
"key": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight''',
"query": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight''',
"value": f'''up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight''',
},
}
assign_to_checkpoint(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,additional_replacements=[meta_path] ,attention_paths_to_split=to_split if any("qkv" in key for key in attentions ) else None ,config=_SCREAMING_SNAKE_CASE ,)
else:
lowerCamelCase : List[str] = renew_resnet_paths(_SCREAMING_SNAKE_CASE ,n_shave_prefix_segments=1 )
for path in resnet_0_paths:
lowerCamelCase : Tuple = ".".join(["output_blocks", str(_SCREAMING_SNAKE_CASE ), path["old"]] )
lowerCamelCase : Optional[int] = ".".join(["up_blocks", str(_SCREAMING_SNAKE_CASE ), "resnets", str(_SCREAMING_SNAKE_CASE ), path["new"]] )
lowerCamelCase : Any = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
SCREAMING_SNAKE_CASE__ : List[Any] = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
SCREAMING_SNAKE_CASE__ : int = json.loads(f.read())
SCREAMING_SNAKE_CASE__ : str = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
SCREAMING_SNAKE_CASE__ : List[Any] = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
SCREAMING_SNAKE_CASE__ : int = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
SCREAMING_SNAKE_CASE__ : Union[str, Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 48
|
import random
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> tuple:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
lowerCamelCase : List[Any] = items[random.randint(0 ,len(_SCREAMING_SNAKE_CASE ) - 1 )]
lowerCamelCase : Dict = 0
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = _partition(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE ,index - (m + count) )
| 48
| 1
|
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
SCREAMING_SNAKE_CASE__ : Dict = 'base_with_context'
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
lowerCamelCase : List[str] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) ,requires_grad=_SCREAMING_SNAKE_CASE )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase : List[str] = weights[f'''layers_{lyr_num}''']
lowerCamelCase : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
lowerCamelCase : Dict = ly_weight["attention"]
lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[str]:
lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
lowerCamelCase : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) ,requires_grad=_SCREAMING_SNAKE_CASE )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase : Optional[Any] = weights[f'''layers_{lyr_num}''']
lowerCamelCase : Union[str, Any] = ly_weight["attention"]
lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowerCamelCase : str = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowerCamelCase : List[str] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowerCamelCase : str = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
lowerCamelCase : List[Any] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) ,requires_grad=_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowerCamelCase : int = weights[f'''layers_{lyr_num}''']
lowerCamelCase : Optional[int] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
lowerCamelCase : Any = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
lowerCamelCase : Optional[Any] = ly_weight["self_attention"]
lowerCamelCase : str = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowerCamelCase : int = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowerCamelCase : str = ly_weight["MultiHeadDotProductAttention_0"]
lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowerCamelCase : int = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowerCamelCase : Any = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
lowerCamelCase : str = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowerCamelCase : str = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowerCamelCase : str = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
lowerCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def A ( _SCREAMING_SNAKE_CASE ) -> int:
lowerCamelCase : str = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowerCamelCase : Union[str, Any] = jnp.tree_util.tree_map(onp.array ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
lowerCamelCase : Tuple = os.path.join(args.checkpoint_path ,".." ,"config.gin" )
lowerCamelCase : Optional[Any] = inference.parse_training_gin_file(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = inference.InferenceModel(args.checkpoint_path ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ,variance_type="fixed_large" )
lowerCamelCase : Dict = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] ,vocab_size=synth_model.model.module.config.vocab_size ,d_model=synth_model.model.module.config.emb_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,num_layers=synth_model.model.module.config.num_encoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,feed_forward_proj="gated-gelu" ,)
lowerCamelCase : List[str] = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims ,targets_context_length=synth_model.sequence_length["targets_context"] ,d_model=synth_model.model.module.config.emb_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,num_layers=synth_model.model.module.config.num_encoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,feed_forward_proj="gated-gelu" ,)
lowerCamelCase : Optional[int] = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims ,targets_length=synth_model.sequence_length["targets_context"] ,max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time ,d_model=synth_model.model.module.config.emb_dim ,num_layers=synth_model.model.module.config.num_decoder_layers ,num_heads=synth_model.model.module.config.num_heads ,d_kv=synth_model.model.module.config.head_dim ,d_ff=synth_model.model.module.config.mlp_dim ,dropout_rate=synth_model.model.module.config.dropout_rate ,)
lowerCamelCase : List[str] = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = load_decoder(ta_checkpoint["target"]["decoder"] ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
lowerCamelCase : List[Any] = SpectrogramDiffusionPipeline(
notes_encoder=_SCREAMING_SNAKE_CASE ,continuous_encoder=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE ,scheduler=_SCREAMING_SNAKE_CASE ,melgan=_SCREAMING_SNAKE_CASE ,)
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
main(args)
| 48
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
return x if y == 0 else greatest_common_divisor(_SCREAMING_SNAKE_CASE ,x % y )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
return (x * y) // greatest_common_divisor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE = 20 ) -> int:
lowerCamelCase : List[Any] = 1
for i in range(1 ,n + 1 ):
lowerCamelCase : List[str] = lcm(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 48
| 1
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
SCREAMING_SNAKE_CASE__ : Optional[int] = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
SCREAMING_SNAKE_CASE__ : List[Any] = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
@lru_cache()
def A ( ) -> Union[str, Any]:
lowerCamelCase : Any = (
list(range(ord("!" ) ,ord("~" ) + 1 ) ) + list(range(ord("¡" ) ,ord("¬" ) + 1 ) ) + list(range(ord("®" ) ,ord("ÿ" ) + 1 ) )
)
lowerCamelCase : str = bs[:]
lowerCamelCase : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
lowerCamelCase : Any = [chr(_SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
def A ( _SCREAMING_SNAKE_CASE ) -> List[str]:
lowerCamelCase : Tuple = set()
lowerCamelCase : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase : Optional[int] = char
return pairs
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = VOCAB_FILES_NAMES
lowerCamelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="replace" , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<mask>" , UpperCamelCase__=False , **UpperCamelCase__ , ) -> List[str]:
lowerCamelCase : List[Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
lowerCamelCase : Optional[int] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
lowerCamelCase : Union[str, Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
lowerCamelCase : Union[str, Any] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
lowerCamelCase : Dict = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
lowerCamelCase : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase : Dict = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , )
with open(UpperCamelCase__ , encoding="utf-8" ) as vocab_handle:
lowerCamelCase : int = json.load(UpperCamelCase__ )
lowerCamelCase : List[Any] = {v: k for k, v in self.encoder.items()}
lowerCamelCase : List[str] = errors # how to handle errors in decoding
lowerCamelCase : List[Any] = bytes_to_unicode()
lowerCamelCase : int = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCamelCase__ , encoding="utf-8" ) as merges_handle:
lowerCamelCase : Union[str, Any] = merges_handle.read().split("\n" )[1:-1]
lowerCamelCase : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase : Tuple = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase : Tuple = {}
lowerCamelCase : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase : List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def _lowercase ( self ) -> List[str]:
return len(self.encoder )
def _lowercase ( self ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self , UpperCamelCase__ ) -> str:
if token in self.cache:
return self.cache[token]
lowerCamelCase : Any = tuple(UpperCamelCase__ )
lowerCamelCase : List[str] = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
lowerCamelCase : List[Any] = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase , lowerCamelCase : Any = bigram
lowerCamelCase : int = []
lowerCamelCase : Optional[Any] = 0
while i < len(UpperCamelCase__ ):
try:
lowerCamelCase : Optional[Any] = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase : List[str] = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase : Optional[int] = tuple(UpperCamelCase__ )
lowerCamelCase : str = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
lowerCamelCase : List[str] = get_pairs(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = " ".join(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = word
return word
def _lowercase ( self , UpperCamelCase__ ) -> str:
lowerCamelCase : Tuple = []
for token in re.findall(self.pat , UpperCamelCase__ ):
lowerCamelCase : str = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase__ ).split(" " ) )
return bpe_tokens
def _lowercase ( self , UpperCamelCase__ ) -> Union[str, Any]:
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def _lowercase ( self , UpperCamelCase__ ) -> Tuple:
return self.decoder.get(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> Optional[int]:
lowerCamelCase : str = "".join(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase : Optional[int] = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase : Optional[int] = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + "\n" )
lowerCamelCase : Optional[int] = 0
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowerCamelCase : Any = token_index
writer.write(" ".join(UpperCamelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase : List[Any] = [self.cls_token_id]
lowerCamelCase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : Optional[Any] = [self.sep_token_id]
lowerCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=False , **UpperCamelCase__ ) -> Optional[int]:
lowerCamelCase : List[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase__ ) > 0 and not text[0].isspace()):
lowerCamelCase : Tuple = " " + text
return (text, kwargs)
| 48
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowerCAmelCase__ ) , """Tatoeba directory does not exist.""" )
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self ) -> int:
lowerCamelCase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=UpperCamelCase__ )
@slow
def _lowercase ( self ) -> List[Any]:
self.resolver.convert_models(["heb-eng"] )
@slow
def _lowercase ( self ) -> Tuple:
lowerCamelCase , lowerCamelCase : Dict = self.resolver.write_model_card("opus-mt-he-en" , dry_run=UpperCamelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 48
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
# Initialise PyTorch model
lowerCamelCase : Any = TaConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase : str = TaForConditionalGeneration(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 48
| 1
|
from math import factorial, pi
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 30 ) -> float:
if not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
lowerCamelCase : int = float(_SCREAMING_SNAKE_CASE )
lowerCamelCase : int = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(_SCREAMING_SNAKE_CASE ) )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 30 ) -> float:
if not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
lowerCamelCase : Optional[Any] = float(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 48
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
| 1
|
from __future__ import annotations
def A ( _SCREAMING_SNAKE_CASE ) -> bool:
lowerCamelCase : List[Any] = len(_SCREAMING_SNAKE_CASE )
# We need to create solution object to save path.
lowerCamelCase : Dict = [[0 for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
lowerCamelCase : str = run_maze(_SCREAMING_SNAKE_CASE ,0 ,0 ,_SCREAMING_SNAKE_CASE )
if solved:
print("\n".join(str(_SCREAMING_SNAKE_CASE ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> bool:
lowerCamelCase : int = len(_SCREAMING_SNAKE_CASE )
# Final check point.
if i == j == (size - 1):
lowerCamelCase : Optional[Any] = 1
return True
lowerCamelCase : str = (not i < 0) and (not j < 0) # Check lower bounds
lowerCamelCase : Any = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowerCamelCase : Tuple = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowerCamelCase : Tuple = 1
# check for directions
if (
run_maze(_SCREAMING_SNAKE_CASE ,i + 1 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
or run_maze(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,j + 1 ,_SCREAMING_SNAKE_CASE )
or run_maze(_SCREAMING_SNAKE_CASE ,i - 1 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
or run_maze(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,j - 1 ,_SCREAMING_SNAKE_CASE )
):
return True
lowerCamelCase : int = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> list:
lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = []
for i in range(len(_SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
lowerCamelCase : Dict = True
for j in range(_SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
lowerCamelCase : Optional[int] = False
break
if match_found:
position.append(_SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 48
| 1
|
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
'pipelines_utils',
'0.22.0',
'Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.',
standard_warn=False,
stacklevel=3,
)
| 48
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
| 1
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
lowerCamelCase : Union[str, Any] = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowerCamelCase : Any = n - k
# Calculate C(n,k)
for i in range(_SCREAMING_SNAKE_CASE ):
result *= n - i
result //= i + 1
return result
def A ( _SCREAMING_SNAKE_CASE ) -> int:
return binomial_coefficient(2 * node_count ,_SCREAMING_SNAKE_CASE ) // (node_count + 1)
def A ( _SCREAMING_SNAKE_CASE ) -> int:
if n < 0:
raise ValueError("factorial() not defined for negative values" )
lowerCamelCase : List[Any] = 1
for i in range(1 ,n + 1 ):
result *= i
return result
def A ( _SCREAMING_SNAKE_CASE ) -> int:
return catalan_number(_SCREAMING_SNAKE_CASE ) * factorial(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = int(input('Enter the number of nodes: ').strip() or 0)
if node_count <= 0:
raise ValueError('We need some nodes to work with.')
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 48
|
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def A ( _SCREAMING_SNAKE_CASE ) -> tuple:
return (data["data"], data["target"])
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> np.ndarray:
lowerCamelCase : List[str] = XGBRegressor(verbosity=0 ,random_state=42 )
xgb.fit(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Predict target for test data
lowerCamelCase : List[Any] = xgb.predict(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = predictions.reshape(len(_SCREAMING_SNAKE_CASE ) ,1 )
return predictions
def A ( ) -> None:
lowerCamelCase : Dict = fetch_california_housing()
lowerCamelCase , lowerCamelCase : Tuple = data_handling(_SCREAMING_SNAKE_CASE )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = train_test_split(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,test_size=0.25 ,random_state=1 )
lowerCamelCase : Any = xgboost(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}''' )
print(f'''Mean Square Error : {mean_squared_error(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 48
| 1
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def A ( _SCREAMING_SNAKE_CASE ) -> list[list[float]]:
lowerCamelCase : List[str] = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_SCREAMING_SNAKE_CASE ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowerCamelCase : Any = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
lowerCamelCase : List[Any] = [[0.0, 0.0], [0.0, 0.0]]
lowerCamelCase , lowerCamelCase : Tuple = matrix[1][1], matrix[0][0]
lowerCamelCase , lowerCamelCase : Tuple = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_SCREAMING_SNAKE_CASE ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_SCREAMING_SNAKE_CASE ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowerCamelCase : Tuple = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
lowerCamelCase : Union[str, Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowerCamelCase : Any = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowerCamelCase : List[str] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowerCamelCase : List[str] = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowerCamelCase : Tuple = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowerCamelCase : Tuple = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowerCamelCase : Optional[Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowerCamelCase : Tuple = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowerCamelCase : Tuple = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowerCamelCase : Union[str, Any] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowerCamelCase : str = array(_SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
lowerCamelCase : List[str] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowerCamelCase : Optional[int] = array(_SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_SCREAMING_SNAKE_CASE )
# Calculate the inverse of the matrix
return [[float(d(_SCREAMING_SNAKE_CASE ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 48
|
from math import sqrt
def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int:
lowerCamelCase : int = 0
lowerCamelCase : int = 0
lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_SCREAMING_SNAKE_CASE ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 48
| 1
|
import logging
from transformers.configuration_utils import PretrainedConfig
SCREAMING_SNAKE_CASE__ : List[str] = logging.getLogger(__name__)
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = """masked_bert"""
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0 , UpperCamelCase__="topK" , UpperCamelCase__="constant" , UpperCamelCase__=0.0 , **UpperCamelCase__ , ) -> List[Any]:
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : int = vocab_size
lowerCamelCase : Tuple = hidden_size
lowerCamelCase : Dict = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : Dict = hidden_dropout_prob
lowerCamelCase : Tuple = attention_probs_dropout_prob
lowerCamelCase : Tuple = max_position_embeddings
lowerCamelCase : Any = type_vocab_size
lowerCamelCase : str = initializer_range
lowerCamelCase : Union[str, Any] = layer_norm_eps
lowerCamelCase : int = pruning_method
lowerCamelCase : Tuple = mask_init
lowerCamelCase : List[Any] = mask_scale
| 48
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
SCREAMING_SNAKE_CASE__ : Dict = logging.getLogger(__name__)
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = """sequence-classification"""
def __init__( self , UpperCamelCase__ ) -> List[Any]:
if type(UpperCamelCase__ ) == dict:
lowerCamelCase : int = Namespace(**UpperCamelCase__ )
lowerCamelCase : str = glue_output_modes[hparams.task]
lowerCamelCase : int = glue_tasks_num_labels[hparams.task]
super().__init__(UpperCamelCase__ , UpperCamelCase__ , self.mode )
def _lowercase ( self , **UpperCamelCase__ ) -> Tuple:
return self.model(**UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
lowerCamelCase : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCamelCase : List[str] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
lowerCamelCase : Optional[int] = self(**UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = outputs[0]
lowerCamelCase : str = self.trainer.lr_schedulers[0]["scheduler"]
lowerCamelCase : Optional[int] = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _lowercase ( self ) -> str:
lowerCamelCase : Any = self.hparams
lowerCamelCase : Union[str, Any] = processors[args.task]()
lowerCamelCase : Optional[int] = processor.get_labels()
for mode in ["train", "dev"]:
lowerCamelCase : Optional[Any] = self._feature_file(UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , UpperCamelCase__ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowerCamelCase : List[str] = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
lowerCamelCase : Dict = convert_examples_to_features(
UpperCamelCase__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , UpperCamelCase__ )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> DataLoader:
lowerCamelCase : str = "dev" if mode == "test" else mode
lowerCamelCase : int = self._feature_file(UpperCamelCase__ )
logger.info("Loading features from cached file %s" , UpperCamelCase__ )
lowerCamelCase : str = torch.load(UpperCamelCase__ )
lowerCamelCase : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCamelCase : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
lowerCamelCase : List[str] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowerCamelCase : Any = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowerCamelCase : Union[str, Any] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , batch_size=UpperCamelCase__ , shuffle=UpperCamelCase__ , )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
lowerCamelCase : Dict = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCamelCase : Tuple = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
lowerCamelCase : Dict = self(**UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : Any = outputs[:2]
lowerCamelCase : Union[str, Any] = logits.detach().cpu().numpy()
lowerCamelCase : Optional[Any] = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowercase ( self , UpperCamelCase__ ) -> tuple:
lowerCamelCase : Union[str, Any] = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
lowerCamelCase : Optional[int] = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
lowerCamelCase : Union[str, Any] = np.argmax(UpperCamelCase__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowerCamelCase : str = np.squeeze(UpperCamelCase__ )
lowerCamelCase : List[Any] = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowerCamelCase : List[str] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase : Dict = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , UpperCamelCase__ , UpperCamelCase__ )}
lowerCamelCase : List[str] = dict(results.items() )
lowerCamelCase : Optional[int] = results
return ret, preds_list, out_label_list
def _lowercase ( self , UpperCamelCase__ ) -> dict:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = self._eval_end(UpperCamelCase__ )
lowerCamelCase : str = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowercase ( self , UpperCamelCase__ ) -> dict:
lowerCamelCase , lowerCamelCase , lowerCamelCase : str = self._eval_end(UpperCamelCase__ )
lowerCamelCase : str = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ )
parser.add_argument(
"--max_seq_length" , default=128 , type=UpperCamelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=UpperCamelCase__ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def A ( ) -> int:
lowerCamelCase : int = argparse.ArgumentParser()
add_generic_args(_SCREAMING_SNAKE_CASE ,os.getcwd() )
lowerCamelCase : str = GLUETransformer.add_model_specific_args(_SCREAMING_SNAKE_CASE ,os.getcwd() )
lowerCamelCase : str = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCamelCase : int = os.path.join(
"./results" ,f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' ,)
os.makedirs(args.output_dir )
lowerCamelCase : int = GLUETransformer(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Dict = generic_train(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir ,"checkpoint-epoch=*.ckpt" ) ,recursive=_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Tuple = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 48
| 1
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowerCAmelCase__ ) , """Tatoeba directory does not exist.""" )
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self ) -> int:
lowerCamelCase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=UpperCamelCase__ )
@slow
def _lowercase ( self ) -> List[Any]:
self.resolver.convert_models(["heb-eng"] )
@slow
def _lowercase ( self ) -> Tuple:
lowerCamelCase , lowerCamelCase : Dict = self.resolver.write_model_card("opus-mt-he-en" , dry_run=UpperCamelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 48
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
lowerCamelCase : str = (boundary[1] - boundary[0]) / steps
lowerCamelCase : List[str] = boundary[0]
lowerCamelCase : Union[str, Any] = boundary[1]
lowerCamelCase : int = make_points(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = 0.0
y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE )
for i in x_i:
# print(i)
y += h * f(_SCREAMING_SNAKE_CASE )
y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE )
return y
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
lowerCamelCase : int = a + h
while x < (b - h):
yield x
lowerCamelCase : List[str] = x + h
def A ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # enter your function here
lowerCamelCase : str = (x - 0) * (x - 0)
return y
def A ( ) -> int:
lowerCamelCase : int = 0.0 # Lower bound of integration
lowerCamelCase : int = 1.0 # Upper bound of integration
lowerCamelCase : Dict = 10.0 # define number of steps or resolution
lowerCamelCase : int = [a, b] # define boundary of integration
lowerCamelCase : str = method_a(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 48
| 1
|
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
super().__init__()
self.register_modules(vqvae=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self , UpperCamelCase__ = 1 , UpperCamelCase__ = None , UpperCamelCase__ = 0.0 , UpperCamelCase__ = 50 , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , **UpperCamelCase__ , ) -> Union[Tuple, ImagePipelineOutput]:
lowerCamelCase : List[Any] = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCamelCase__ , )
lowerCamelCase : Any = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase : Any = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCamelCase__ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCamelCase : Optional[int] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase : Union[str, Any] = {}
if accepts_eta:
lowerCamelCase : Tuple = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCamelCase : int = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
# predict the noise residual
lowerCamelCase : int = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase : Tuple = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
# decode the image latents with the VAE
lowerCamelCase : List[str] = self.vqvae.decode(UpperCamelCase__ ).sample
lowerCamelCase : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase : str = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 48
|
def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int:
lowerCamelCase : Tuple = 1
lowerCamelCase : int = 1
lowerCamelCase : Optional[Any] = {1: 1}
for inputa in range(2 ,_SCREAMING_SNAKE_CASE ):
lowerCamelCase : Union[str, Any] = 0
lowerCamelCase : List[str] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCamelCase : str = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCamelCase : str = counter
if counter > pre_counter:
lowerCamelCase : str = inputa
lowerCamelCase : Any = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 48
| 1
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Tuple = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
lowerCamelCase : Dict = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
lowerCamelCase : str = VideoClassificationPipeline(model=UpperCamelCase__ , image_processor=UpperCamelCase__ , top_k=2 )
lowerCamelCase : Dict = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> int:
for example in examples:
lowerCamelCase : Optional[int] = video_classifier(UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{"score": ANY(UpperCamelCase__ ), "label": ANY(UpperCamelCase__ )},
{"score": ANY(UpperCamelCase__ ), "label": ANY(UpperCamelCase__ )},
] , )
@require_torch
def _lowercase ( self ) -> Dict:
lowerCamelCase : int = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
lowerCamelCase : Any = VideoMAEFeatureExtractor(
size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} )
lowerCamelCase : Union[str, Any] = pipeline(
"video-classification" , model=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , frame_sampling_rate=4 )
lowerCamelCase : Optional[Any] = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
lowerCamelCase : Union[str, Any] = video_classifier(UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}] , )
lowerCamelCase : List[str] = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
] , )
@require_tf
def _lowercase ( self ) -> int:
pass
| 48
|
import argparse
import os
import re
SCREAMING_SNAKE_CASE__ : List[Any] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ) -> int:
with open(_SCREAMING_SNAKE_CASE ,"r" ,encoding="utf-8" ) as f:
lowerCamelCase : List[Any] = f.read()
lowerCamelCase : str = content.split("\n" )
lowerCamelCase : int = []
lowerCamelCase : List[Any] = 0
while line_idx < len(_SCREAMING_SNAKE_CASE ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowerCamelCase : Optional[int] = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowerCamelCase : Optional[int] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowerCamelCase : List[str] = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowerCamelCase : Union[str, Any] = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : _re_identifier.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(_SCREAMING_SNAKE_CASE ) )
elif "\n".join(_SCREAMING_SNAKE_CASE ) != content:
return True
def A ( _SCREAMING_SNAKE_CASE = False ) -> List[str]:
lowerCamelCase : str = [os.path.join(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for f in os.listdir(_SCREAMING_SNAKE_CASE ) if f.endswith(".py" )]
lowerCamelCase : Union[str, Any] = [sort_auto_mapping(_SCREAMING_SNAKE_CASE ,overwrite=_SCREAMING_SNAKE_CASE ) for fname in fnames]
if not overwrite and any(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : str = [f for f, d in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_SCREAMING_SNAKE_CASE )}. Run `make style` to fix'''
" this." )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 48
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
SCREAMING_SNAKE_CASE__ : Tuple = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = """albert"""
def __init__( self , UpperCamelCase__=3_0000 , UpperCamelCase__=128 , UpperCamelCase__=4096 , UpperCamelCase__=12 , UpperCamelCase__=1 , UpperCamelCase__=64 , UpperCamelCase__=1_6384 , UpperCamelCase__=1 , UpperCamelCase__="gelu_new" , UpperCamelCase__=0 , UpperCamelCase__=0 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0.1 , UpperCamelCase__="absolute" , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__=3 , **UpperCamelCase__ , ) -> Dict:
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Dict = vocab_size
lowerCamelCase : Union[str, Any] = embedding_size
lowerCamelCase : Optional[Any] = hidden_size
lowerCamelCase : Tuple = num_hidden_layers
lowerCamelCase : str = num_hidden_groups
lowerCamelCase : str = num_attention_heads
lowerCamelCase : str = inner_group_num
lowerCamelCase : Any = hidden_act
lowerCamelCase : Tuple = intermediate_size
lowerCamelCase : Optional[Any] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Dict = max_position_embeddings
lowerCamelCase : Union[str, Any] = type_vocab_size
lowerCamelCase : int = initializer_range
lowerCamelCase : int = layer_norm_eps
lowerCamelCase : List[Any] = classifier_dropout_prob
lowerCamelCase : Dict = position_embedding_type
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 48
|
def A ( _SCREAMING_SNAKE_CASE ) -> list:
if n_term == "":
return []
lowerCamelCase : list = []
for temp in range(int(_SCREAMING_SNAKE_CASE ) ):
series.append(f'''1/{temp + 1}''' if series else "1" )
return series
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Dict = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 48
| 1
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
logging.set_verbosity_info()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
if "xprophetnet" in prophetnet_checkpoint_path:
lowerCamelCase : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase , lowerCamelCase : Tuple = XLMProphetNetForConditionalGeneration.from_pretrained(
_SCREAMING_SNAKE_CASE ,output_loading_info=_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase : Dict = ProphetNetForConditionalGenerationOld.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase , lowerCamelCase : Dict = ProphetNetForConditionalGeneration.from_pretrained(
_SCREAMING_SNAKE_CASE ,output_loading_info=_SCREAMING_SNAKE_CASE )
lowerCamelCase : int = ["key_proj", "value_proj", "query_proj"]
lowerCamelCase : Union[str, Any] = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
lowerCamelCase : Union[str, Any] = key.split("." )
if attributes[0] == "lm_head":
lowerCamelCase : Union[str, Any] = prophet
lowerCamelCase : List[str] = prophet_old
else:
lowerCamelCase : Optional[Any] = prophet.prophetnet
lowerCamelCase : Dict = prophet_old.model
lowerCamelCase : Union[str, Any] = False
for attribute in attributes:
if attribute in mapping:
lowerCamelCase : Any = mapping[attribute]
if not hasattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) > 0:
lowerCamelCase : Optional[Any] = attribute
elif hasattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
lowerCamelCase : List[str] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowerCamelCase : Tuple = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
lowerCamelCase : Optional[Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowerCamelCase : str = old_model.bias
logger.info(f'''{attribute} is initialized''' )
lowerCamelCase : List[str] = True
break
elif attribute in special_keys and hasattr(_SCREAMING_SNAKE_CASE ,"in_proj_weight" ):
lowerCamelCase : Union[str, Any] = old_model.in_proj_weight.shape[0] // 3
lowerCamelCase : Dict = getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowerCamelCase : Any = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowerCamelCase : List[str] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowerCamelCase : int = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowerCamelCase : Dict = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowerCamelCase : Dict = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowerCamelCase : Any = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowerCamelCase : Tuple = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowerCamelCase : str = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowerCamelCase : Union[str, Any] = True
break
if attribute.isdigit():
lowerCamelCase : List[str] = model[int(_SCREAMING_SNAKE_CASE )]
lowerCamelCase : Optional[Any] = old_model[int(_SCREAMING_SNAKE_CASE )]
else:
lowerCamelCase : Optional[Any] = getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if old_attribute == "":
lowerCamelCase : str = old_model
else:
if not hasattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
lowerCamelCase : List[str] = getattr(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 48
|
from __future__ import annotations
import requests
def A ( _SCREAMING_SNAKE_CASE ) -> dict:
lowerCamelCase : Tuple = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_SCREAMING_SNAKE_CASE ).json()
def A ( _SCREAMING_SNAKE_CASE = 10 ) -> list[dict]:
lowerCamelCase : str = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
lowerCamelCase : Any = requests.get(_SCREAMING_SNAKE_CASE ).json()[:max_stories]
return [get_hackernews_story(_SCREAMING_SNAKE_CASE ) for story_id in story_ids]
def A ( _SCREAMING_SNAKE_CASE = 10 ) -> str:
lowerCamelCase : str = hackernews_top_stories(_SCREAMING_SNAKE_CASE )
return "\n".join("* [{title}]({url})".format(**_SCREAMING_SNAKE_CASE ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 48
| 1
|
def A ( _SCREAMING_SNAKE_CASE = 50 ) -> int:
lowerCamelCase : str = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 ,5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 48
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {
'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json',
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = """blip_2_vision_model"""
def __init__( self , UpperCamelCase__=1408 , UpperCamelCase__=6144 , UpperCamelCase__=39 , UpperCamelCase__=16 , UpperCamelCase__=224 , UpperCamelCase__=14 , UpperCamelCase__="gelu" , UpperCamelCase__=0.00001 , UpperCamelCase__=0.0 , UpperCamelCase__=1e-10 , UpperCamelCase__=True , **UpperCamelCase__ , ) -> Optional[Any]:
super().__init__(**UpperCamelCase__ )
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Union[str, Any] = intermediate_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Dict = patch_size
lowerCamelCase : Tuple = image_size
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Union[str, Any] = attention_dropout
lowerCamelCase : Dict = layer_norm_eps
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : str = qkv_bias
@classmethod
def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : List[str] = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
lowerCamelCase : Optional[int] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = """blip_2_qformer"""
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0 , UpperCamelCase__="absolute" , UpperCamelCase__=2 , UpperCamelCase__=1408 , **UpperCamelCase__ , ) -> int:
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : Dict = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : int = hidden_act
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Dict = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Dict = max_position_embeddings
lowerCamelCase : List[str] = initializer_range
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : int = position_embedding_type
lowerCamelCase : Tuple = cross_attention_frequency
lowerCamelCase : Optional[int] = encoder_hidden_size
@classmethod
def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : str = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
lowerCamelCase : int = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : List[str] = """blip-2"""
lowerCamelCase_ : int = True
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=32 , **UpperCamelCase__ ) -> str:
super().__init__(**UpperCamelCase__ )
if vision_config is None:
lowerCamelCase : List[Any] = {}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." )
if qformer_config is None:
lowerCamelCase : List[Any] = {}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." )
if text_config is None:
lowerCamelCase : Any = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
lowerCamelCase : Optional[int] = BlipaVisionConfig(**UpperCamelCase__ )
lowerCamelCase : str = BlipaQFormerConfig(**UpperCamelCase__ )
lowerCamelCase : List[str] = text_config["model_type"] if "model_type" in text_config else "opt"
lowerCamelCase : str = CONFIG_MAPPING[text_model_type](**UpperCamelCase__ )
lowerCamelCase : Optional[Any] = self.text_config.tie_word_embeddings
lowerCamelCase : int = self.text_config.is_encoder_decoder
lowerCamelCase : Optional[Any] = num_query_tokens
lowerCamelCase : int = self.vision_config.hidden_size
lowerCamelCase : Tuple = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase : Dict = 1.0
lowerCamelCase : List[Any] = 0.02
@classmethod
def _lowercase ( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ , ) -> str:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase__ , )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Tuple = copy.deepcopy(self.__dict__ )
lowerCamelCase : Tuple = self.vision_config.to_dict()
lowerCamelCase : int = self.qformer_config.to_dict()
lowerCamelCase : Optional[Any] = self.text_config.to_dict()
lowerCamelCase : int = self.__class__.model_type
return output
| 48
| 1
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> list:
lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = []
for i in range(len(_SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
lowerCamelCase : Dict = True
for j in range(_SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
lowerCamelCase : Optional[int] = False
break
if match_found:
position.append(_SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 48
|
import random
from .binary_exp_mod import bin_exp_mod
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=1000 ) -> List[str]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCamelCase : List[Any] = n - 1
lowerCamelCase : Dict = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCamelCase : Optional[Any] = 0
while count < prec:
lowerCamelCase : str = random.randint(2 ,n - 1 )
lowerCamelCase : Dict = bin_exp_mod(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if b != 1:
lowerCamelCase : str = True
for _ in range(_SCREAMING_SNAKE_CASE ):
if b == n - 1:
lowerCamelCase : Tuple = False
break
lowerCamelCase : int = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 48
| 1
|
import os
import pytest
from transformers.dynamic_module_utils import get_imports
SCREAMING_SNAKE_CASE__ : str = '\nimport os\n'
SCREAMING_SNAKE_CASE__ : str = '\ndef foo():\n import os\n return False\n'
SCREAMING_SNAKE_CASE__ : Any = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
SCREAMING_SNAKE_CASE__ : Optional[int] = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
SCREAMING_SNAKE_CASE__ : List[str] = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
SCREAMING_SNAKE_CASE__ : Any = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
SCREAMING_SNAKE_CASE__ : str = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
SCREAMING_SNAKE_CASE__ : Optional[Any] = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
SCREAMING_SNAKE_CASE__ : Tuple = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("case" ,_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowerCamelCase : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE ,"test_file.py" )
with open(_SCREAMING_SNAKE_CASE ,"w" ) as _tmp_file:
_tmp_file.write(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = get_imports(_SCREAMING_SNAKE_CASE )
assert parsed_imports == ["os"]
| 48
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE__ : int = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE__ : str = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Tuple = 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : List[str] = 3
SCREAMING_SNAKE_CASE__ : Optional[int] = 4
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = VOCAB_FILES_NAMES
lowerCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : List[str] = """left"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<sep>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<cls>" , UpperCamelCase__="<mask>" , UpperCamelCase__=["<eop>", "<eod>"] , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
lowerCamelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
lowerCamelCase : Any = 3
lowerCamelCase : Optional[Any] = do_lower_case
lowerCamelCase : List[Any] = remove_space
lowerCamelCase : str = keep_accents
lowerCamelCase : List[Any] = vocab_file
lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def _lowercase ( self ) -> Optional[Any]:
return len(self.sp_model )
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : int = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
lowerCamelCase : Optional[int] = self.__dict__.copy()
lowerCamelCase : Union[str, Any] = None
return state
def __setstate__( self , UpperCamelCase__ ) -> int:
lowerCamelCase : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase : Any = {}
lowerCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self , UpperCamelCase__ ) -> Any:
if self.remove_space:
lowerCamelCase : Dict = " ".join(inputs.strip().split() )
else:
lowerCamelCase : Union[str, Any] = inputs
lowerCamelCase : Optional[Any] = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase : Optional[int] = unicodedata.normalize("NFKD" , UpperCamelCase__ )
lowerCamelCase : List[Any] = "".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
lowerCamelCase : List[str] = outputs.lower()
return outputs
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Optional[Any] = self.preprocess_text(UpperCamelCase__ )
lowerCamelCase : Dict = self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
lowerCamelCase : Dict = []
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase : Union[str, Any] = cur_pieces[1:]
else:
lowerCamelCase : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def _lowercase ( self , UpperCamelCase__ ) -> int:
return self.sp_model.PieceToId(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> Tuple:
return self.sp_model.IdToPiece(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = True , **UpperCamelCase__ , ) -> str:
lowerCamelCase : Optional[int] = kwargs.pop("use_source_tokenizer" , UpperCamelCase__ )
lowerCamelCase : Optional[int] = self.convert_ids_to_tokens(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase : Any = []
lowerCamelCase : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) )
lowerCamelCase : int = []
sub_texts.append(UpperCamelCase__ )
else:
current_sub_text.append(UpperCamelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ )
lowerCamelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase : int = self.clean_up_tokenization(UpperCamelCase__ )
return clean_text
else:
return text
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : str = [self.sep_token_id]
lowerCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1]
return ([0] * len(UpperCamelCase__ )) + [1, 1]
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : Any = [self.sep_token_id]
lowerCamelCase : List[str] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase : Union[str, Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 48
| 1
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : int = ["""image_processor""", """tokenizer"""]
lowerCamelCase_ : Tuple = """BridgeTowerImageProcessor"""
lowerCamelCase_ : List[str] = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = 0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = True , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> BatchEncoding:
lowerCamelCase : Any = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
lowerCamelCase : Dict = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def _lowercase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def _lowercase ( self ) -> str:
lowerCamelCase : str = self.tokenizer.model_input_names
lowerCamelCase : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 48
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
SCREAMING_SNAKE_CASE__ : Any = {
'b0': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 224,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 240,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 1408,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 260,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 1536,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 300,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 1792,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 380,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 2048,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 456,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 2304,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 528,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 2560,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 600,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def A ( _SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : int = EfficientNetConfig()
lowerCamelCase : List[str] = CONFIG_MAP[model_name]["hidden_dim"]
lowerCamelCase : List[str] = CONFIG_MAP[model_name]["width_coef"]
lowerCamelCase : Any = CONFIG_MAP[model_name]["depth_coef"]
lowerCamelCase : Union[str, Any] = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : Optional[int] = CONFIG_MAP[model_name]["dropout_rate"]
lowerCamelCase : str = CONFIG_MAP[model_name]["dw_padding"]
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "imagenet-1k-id2label.json"
lowerCamelCase : Any = 1000
lowerCamelCase : Any = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) )
lowerCamelCase : List[str] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : Any = {v: k for k, v in idalabel.items()}
return config
def A ( ) -> int:
lowerCamelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def A ( _SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : List[Any] = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : str = EfficientNetImageProcessor(
size={"height": size, "width": size} ,image_mean=[0.485, 0.456, 0.406] ,image_std=[0.47853944, 0.4732864, 0.47434163] ,do_center_crop=_SCREAMING_SNAKE_CASE ,)
return preprocessor
def A ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowerCamelCase : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
lowerCamelCase : Any = sorted(set(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE ,range(_SCREAMING_SNAKE_CASE ) )}
lowerCamelCase : List[Any] = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
lowerCamelCase : Dict = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
lowerCamelCase : Optional[int] = {}
for item in rename_keys:
if item[0] in original_param_names:
lowerCamelCase : List[str] = "efficientnet." + item[1]
lowerCamelCase : int = "classifier.weight"
lowerCamelCase : Union[str, Any] = "classifier.bias"
return key_mapping
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowerCamelCase : Tuple = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowerCamelCase : List[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 ,2 ,0 ,1 )
elif "depthwise_kernel" in key:
lowerCamelCase : int = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 ,3 ,0 ,1 )
elif "kernel" in key:
lowerCamelCase : List[str] = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) )
else:
lowerCamelCase : Optional[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowerCamelCase : Optional[int] = model_classes[model_name](
include_top=_SCREAMING_SNAKE_CASE ,weights="imagenet" ,input_tensor=_SCREAMING_SNAKE_CASE ,input_shape=_SCREAMING_SNAKE_CASE ,pooling=_SCREAMING_SNAKE_CASE ,classes=1000 ,classifier_activation="softmax" ,)
lowerCamelCase : List[Any] = original_model.trainable_variables
lowerCamelCase : Tuple = original_model.non_trainable_variables
lowerCamelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowerCamelCase : List[str] = param.numpy()
lowerCamelCase : int = list(tf_params.keys() )
# Load HuggingFace model
lowerCamelCase : Union[str, Any] = get_efficientnet_config(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
lowerCamelCase : Tuple = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
lowerCamelCase : Union[str, Any] = rename_keys(_SCREAMING_SNAKE_CASE )
replace_params(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
lowerCamelCase : int = convert_image_processor(_SCREAMING_SNAKE_CASE )
lowerCamelCase : int = preprocessor(images=prepare_img() ,return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowerCamelCase : Optional[Any] = hf_model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = outputs.logits.detach().numpy()
# Original model inference
lowerCamelCase : Optional[Any] = False
lowerCamelCase : Any = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : Optional[int] = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST )
lowerCamelCase : Union[str, Any] = image.img_to_array(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = np.expand_dims(_SCREAMING_SNAKE_CASE ,axis=0 )
lowerCamelCase : Dict = original_model.predict(_SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.mkdir(_SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
lowerCamelCase : int = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 48
| 1
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : str = jnp.ones((batch_size, length) ) / length
return scores
def _lowercase ( self ) -> Tuple:
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[int] = 20
lowerCamelCase : Dict = self._get_uniform_logits(batch_size=2 , length=UpperCamelCase__ )
# tweak scores to not be uniform anymore
lowerCamelCase : Union[str, Any] = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
lowerCamelCase : int = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
lowerCamelCase : Any = jax.nn.softmax(UpperCamelCase__ , axis=-1 )
lowerCamelCase : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase : List[Any] = FlaxTemperatureLogitsWarper(temperature=1.3 )
lowerCamelCase : Dict = jax.nn.softmax(temp_dist_warper_sharper(UpperCamelCase__ , scores.copy() , cur_len=UpperCamelCase__ ) , axis=-1 )
lowerCamelCase : Any = jax.nn.softmax(temp_dist_warper_smoother(UpperCamelCase__ , scores.copy() , cur_len=UpperCamelCase__ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def _lowercase ( self ) -> Any:
lowerCamelCase : Dict = None
lowerCamelCase : Tuple = 10
lowerCamelCase : Dict = 2
# create ramp distribution
lowerCamelCase : str = np.broadcast_to(np.arange(UpperCamelCase__ )[None, :] , (batch_size, vocab_size) ).copy()
lowerCamelCase : str = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowerCamelCase : Dict = FlaxTopKLogitsWarper(3 )
lowerCamelCase : str = top_k_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
lowerCamelCase : int = 5
lowerCamelCase : List[Any] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
lowerCamelCase : int = np.broadcast_to(np.arange(UpperCamelCase__ )[None, :] , (batch_size, length) ).copy()
lowerCamelCase : List[str] = top_k_warp_safety_check(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : Union[str, Any] = None
lowerCamelCase : int = 10
lowerCamelCase : Optional[Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowerCamelCase : Optional[Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
lowerCamelCase : Any = FlaxTopPLogitsWarper(0.8 )
lowerCamelCase : Optional[int] = np.exp(top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowerCamelCase : str = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# check edge cases with negative and extreme logits
lowerCamelCase : Optional[int] = np.broadcast_to(np.arange(UpperCamelCase__ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowerCamelCase : int = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowerCamelCase : Union[str, Any] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
lowerCamelCase : Optional[int] = top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : List[str] = 20
lowerCamelCase : List[str] = 4
lowerCamelCase : List[str] = 0
lowerCamelCase : Dict = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase__ )
# check that min length is applied at length 5
lowerCamelCase : Dict = ids_tensor((batch_size, 20) , vocab_size=20 )
lowerCamelCase : Tuple = 5
lowerCamelCase : Optional[Any] = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[int] = min_dist_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
lowerCamelCase : Optional[Any] = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : str = 15
lowerCamelCase : List[Any] = min_dist_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertFalse(jnp.isinf(UpperCamelCase__ ).any() )
def _lowercase ( self ) -> str:
lowerCamelCase : List[str] = 20
lowerCamelCase : List[Any] = 4
lowerCamelCase : str = 0
lowerCamelCase : List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase__ )
# check that all scores are -inf except the bos_token_id score
lowerCamelCase : Any = ids_tensor((batch_size, 1) , vocab_size=20 )
lowerCamelCase : str = 1
lowerCamelCase : Tuple = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : int = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowerCamelCase : List[str] = 3
lowerCamelCase : Optional[Any] = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertFalse(jnp.isinf(UpperCamelCase__ ).any() )
def _lowercase ( self ) -> List[str]:
lowerCamelCase : Dict = 20
lowerCamelCase : Optional[int] = 4
lowerCamelCase : int = 0
lowerCamelCase : Optional[int] = 5
lowerCamelCase : int = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
# check that all scores are -inf except the eos_token_id when max_length is reached
lowerCamelCase : Tuple = ids_tensor((batch_size, 4) , vocab_size=20 )
lowerCamelCase : Optional[int] = 4
lowerCamelCase : int = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowerCamelCase : int = 3
lowerCamelCase : List[str] = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Dict = logits_processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
self.assertFalse(jnp.isinf(UpperCamelCase__ ).any() )
def _lowercase ( self ) -> Any:
lowerCamelCase : List[str] = 4
lowerCamelCase : Union[str, Any] = 10
lowerCamelCase : Dict = 15
lowerCamelCase : int = 2
lowerCamelCase : List[str] = 1
lowerCamelCase : List[str] = 15
# dummy input_ids and scores
lowerCamelCase : Dict = ids_tensor((batch_size, sequence_length) , UpperCamelCase__ )
lowerCamelCase : Tuple = input_ids.copy()
lowerCamelCase : Any = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : str = scores.copy()
# instantiate all dist processors
lowerCamelCase : Dict = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase : Dict = FlaxTopKLogitsWarper(3 )
lowerCamelCase : Tuple = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase : List[Any] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = 10
# no processor list
lowerCamelCase : Any = temp_dist_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Tuple = top_k_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Dict = top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : List[str] = min_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Optional[int] = bos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Dict = eos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# with processor list
lowerCamelCase : List[Any] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase : int = processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : List[str] = 4
lowerCamelCase : int = 10
lowerCamelCase : List[str] = 15
lowerCamelCase : Optional[Any] = 2
lowerCamelCase : Optional[int] = 1
lowerCamelCase : Any = 15
# dummy input_ids and scores
lowerCamelCase : Dict = ids_tensor((batch_size, sequence_length) , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = input_ids.copy()
lowerCamelCase : List[Any] = self._get_uniform_logits(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : str = scores.copy()
# instantiate all dist processors
lowerCamelCase : Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5 )
lowerCamelCase : Tuple = FlaxTopKLogitsWarper(3 )
lowerCamelCase : str = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
lowerCamelCase : List[str] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCamelCase__ )
lowerCamelCase : int = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = 10
# no processor list
def run_no_processor_list(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Tuple = temp_dist_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = top_k_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = top_p_warp(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Dict = min_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : List[str] = bos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
lowerCamelCase : Tuple = eos_dist_proc(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
return scores
# with processor list
def run_processor_list(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Optional[int] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
lowerCamelCase : Optional[Any] = processor(UpperCamelCase__ , UpperCamelCase__ , cur_len=UpperCamelCase__ )
return scores
lowerCamelCase : List[Any] = jax.jit(UpperCamelCase__ )
lowerCamelCase : Optional[int] = jax.jit(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = jitted_run_no_processor_list(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Dict = jitted_run_processor_list(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# scores should be equal
self.assertTrue(jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 48
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,) -> List[str]:
if config_name_or_path is None:
lowerCamelCase : Any = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
lowerCamelCase : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCamelCase : Any = question_encoder_name_or_path
lowerCamelCase : str = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
lowerCamelCase : List[Any] = RagConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = gen_config
lowerCamelCase : Optional[Any] = question_encoder_config
lowerCamelCase : List[Any] = model_class.from_pretrained_question_encoder_generator(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
rag_model.save_pretrained(_SCREAMING_SNAKE_CASE )
# Sanity check.
model_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# Save tokenizers.
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
lowerCamelCase : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Optional[Any] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 48
| 1
|
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def A ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowerCamelCase : Dict = SwinConfig(image_size=192 )
if "base" in model_name:
lowerCamelCase : int = 6
lowerCamelCase : str = 128
lowerCamelCase : int = (2, 2, 18, 2)
lowerCamelCase : Optional[int] = (4, 8, 16, 32)
elif "large" in model_name:
lowerCamelCase : Union[str, Any] = 12
lowerCamelCase : List[Any] = 192
lowerCamelCase : int = (2, 2, 18, 2)
lowerCamelCase : Optional[int] = (6, 12, 24, 48)
else:
raise ValueError("Model not supported, only supports base and large variants" )
lowerCamelCase : List[Any] = window_size
lowerCamelCase : List[Any] = embed_dim
lowerCamelCase : Tuple = depths
lowerCamelCase : Any = num_heads
return config
def A ( _SCREAMING_SNAKE_CASE ) -> str:
if "encoder.mask_token" in name:
lowerCamelCase : Optional[Any] = name.replace("encoder.mask_token" ,"embeddings.mask_token" )
if "encoder.patch_embed.proj" in name:
lowerCamelCase : Optional[int] = name.replace("encoder.patch_embed.proj" ,"embeddings.patch_embeddings.projection" )
if "encoder.patch_embed.norm" in name:
lowerCamelCase : int = name.replace("encoder.patch_embed.norm" ,"embeddings.norm" )
if "attn.proj" in name:
lowerCamelCase : Dict = name.replace("attn.proj" ,"attention.output.dense" )
if "attn" in name:
lowerCamelCase : Tuple = name.replace("attn" ,"attention.self" )
if "norm1" in name:
lowerCamelCase : Any = name.replace("norm1" ,"layernorm_before" )
if "norm2" in name:
lowerCamelCase : List[str] = name.replace("norm2" ,"layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase : int = name.replace("mlp.fc1" ,"intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase : Optional[Any] = name.replace("mlp.fc2" ,"output.dense" )
if name == "encoder.norm.weight":
lowerCamelCase : Optional[int] = "layernorm.weight"
if name == "encoder.norm.bias":
lowerCamelCase : Any = "layernorm.bias"
if "decoder" in name:
pass
else:
lowerCamelCase : Optional[int] = "swin." + name
return name
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
for key in orig_state_dict.copy().keys():
lowerCamelCase : List[Any] = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowerCamelCase : Optional[Any] = key.split("." )
lowerCamelCase : Union[str, Any] = int(key_split[2] )
lowerCamelCase : Any = int(key_split[4] )
lowerCamelCase : Any = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCamelCase : List[str] = val[:dim, :]
lowerCamelCase : List[str] = val[
dim : dim * 2, :
]
lowerCamelCase : List[str] = val[-dim:, :]
else:
lowerCamelCase : Tuple = val[
:dim
]
lowerCamelCase : List[Any] = val[
dim : dim * 2
]
lowerCamelCase : Dict = val[
-dim:
]
else:
lowerCamelCase : List[str] = val
return orig_state_dict
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowerCamelCase : Dict = torch.load(_SCREAMING_SNAKE_CASE ,map_location="cpu" )["model"]
lowerCamelCase : Dict = get_swin_config(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = SwinForMaskedImageModeling(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCamelCase : Optional[int] = convert_state_dict(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : str = ViTImageProcessor(size={"height": 192, "width": 192} )
lowerCamelCase : Optional[Any] = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw )
lowerCamelCase : Tuple = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors="pt" )
with torch.no_grad():
lowerCamelCase : List[str] = model(**_SCREAMING_SNAKE_CASE ).logits
print(outputs.keys() )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print(f'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(f'''microsoft/{model_name}''' )
image_processor.push_to_hub(f'''microsoft/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 48
|
import math
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float:
if (
not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float:
if (
not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
from __future__ import annotations
import requests
def A ( _SCREAMING_SNAKE_CASE ) -> dict:
lowerCamelCase : Tuple = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_SCREAMING_SNAKE_CASE ).json()
def A ( _SCREAMING_SNAKE_CASE = 10 ) -> list[dict]:
lowerCamelCase : str = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
lowerCamelCase : Any = requests.get(_SCREAMING_SNAKE_CASE ).json()[:max_stories]
return [get_hackernews_story(_SCREAMING_SNAKE_CASE ) for story_id in story_ids]
def A ( _SCREAMING_SNAKE_CASE = 10 ) -> str:
lowerCamelCase : str = hackernews_top_stories(_SCREAMING_SNAKE_CASE )
return "\n".join("* [{title}]({url})".format(**_SCREAMING_SNAKE_CASE ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 48
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> Any:
lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowerCamelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> str:
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase : Optional[int] = ""
else:
lowerCamelCase : List[str] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase : List[str] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase : Optional[int] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : List[Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
lowerCamelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : List[Any] = in_proj_bias[-config.hidden_size :]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : List[str] = dct.pop(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = val
def A ( ) -> List[str]:
lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : str = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowerCamelCase : Union[str, Any] = DeiTConfig()
# all deit models have fine-tuned heads
lowerCamelCase : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase : Dict = 1000
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "imagenet-1k-id2label.json"
lowerCamelCase : List[Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) )
lowerCamelCase : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Dict = int(deit_name[-6:-4] )
lowerCamelCase : Optional[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
lowerCamelCase : Optional[Any] = 192
lowerCamelCase : List[str] = 768
lowerCamelCase : Tuple = 12
lowerCamelCase : Optional[Any] = 3
elif deit_name[9:].startswith("small" ):
lowerCamelCase : str = 384
lowerCamelCase : Optional[Any] = 1536
lowerCamelCase : Dict = 12
lowerCamelCase : Optional[int] = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
lowerCamelCase : str = 1024
lowerCamelCase : List[str] = 4096
lowerCamelCase : Any = 24
lowerCamelCase : Dict = 16
# load original model from timm
lowerCamelCase : List[Any] = timm.create_model(_SCREAMING_SNAKE_CASE ,pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase : Dict = timm_model.state_dict()
lowerCamelCase : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowerCamelCase : Optional[Any] = DeiTForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by DeiTImageProcessor
lowerCamelCase : Any = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=_SCREAMING_SNAKE_CASE ,crop_size=config.image_size )
lowerCamelCase : str = image_processor(images=prepare_img() ,return_tensors="pt" )
lowerCamelCase : int = encoding["pixel_values"]
lowerCamelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = timm_model(_SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE ,outputs.logits ,atol=1e-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 48
| 1
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = ["""image_processor""", """tokenizer"""]
lowerCamelCase_ : Optional[Any] = """FlavaImageProcessor"""
lowerCamelCase_ : Optional[Any] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> List[Any]:
lowerCamelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCamelCase__ , )
lowerCamelCase : Dict = kwargs.pop("feature_extractor" )
lowerCamelCase : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : List[Any] = self.image_processor
def __call__( self , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = 0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = True , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> Optional[int]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowerCamelCase : Union[str, Any] = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
if images is not None:
lowerCamelCase : str = self.image_processor(
UpperCamelCase__ , return_image_mask=UpperCamelCase__ , return_codebook_pixels=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
if text is not None and images is not None:
encoding.update(UpperCamelCase__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def _lowercase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def _lowercase ( self ) -> str:
lowerCamelCase : Tuple = self.tokenizer.model_input_names
lowerCamelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowercase ( self ) -> Tuple:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCamelCase__ , )
return self.image_processor_class
@property
def _lowercase ( self ) -> Any:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCamelCase__ , )
return self.image_processor
| 48
|
import random
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> tuple:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
lowerCamelCase : List[Any] = items[random.randint(0 ,len(_SCREAMING_SNAKE_CASE ) - 1 )]
lowerCamelCase : Dict = 0
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = _partition(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE ,index - (m + count) )
| 48
| 1
|
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : int = False
def A ( _SCREAMING_SNAKE_CASE ) -> Tuple:
return TrainCommand(_SCREAMING_SNAKE_CASE )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
@staticmethod
def _lowercase ( UpperCamelCase__ ) -> Union[str, Any]:
lowerCamelCase : int = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=UpperCamelCase__ , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=UpperCamelCase__ , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=UpperCamelCase__ , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=UpperCamelCase__ , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=UpperCamelCase__ , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=UpperCamelCase__ , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=UpperCamelCase__ , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=UpperCamelCase__ , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=UpperCamelCase__ , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=UpperCamelCase__ , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=UpperCamelCase__ , default=3e-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=UpperCamelCase__ , default=1e-08 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self , UpperCamelCase__ ) -> Optional[Any]:
lowerCamelCase : List[Any] = logging.get_logger("transformers-cli/training" )
lowerCamelCase : str = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=UpperCamelCase__ )
lowerCamelCase : Any = args.output
lowerCamelCase : List[Any] = args.column_label
lowerCamelCase : List[Any] = args.column_text
lowerCamelCase : List[Any] = args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
lowerCamelCase : Tuple = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
lowerCamelCase : Dict = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCamelCase : Optional[Any] = None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
lowerCamelCase : Dict = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCamelCase : Optional[int] = args.validation_split
lowerCamelCase : Dict = args.train_batch_size
lowerCamelCase : Union[str, Any] = args.valid_batch_size
lowerCamelCase : Any = args.learning_rate
lowerCamelCase : Optional[int] = args.adam_epsilon
def _lowercase ( self ) -> Any:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _lowercase ( self ) -> Any:
raise NotImplementedError
def _lowercase ( self ) -> Any:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 48
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
return x if y == 0 else greatest_common_divisor(_SCREAMING_SNAKE_CASE ,x % y )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
return (x * y) // greatest_common_divisor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE = 20 ) -> int:
lowerCamelCase : List[Any] = 1
for i in range(1 ,n + 1 ):
lowerCamelCase : List[str] = lcm(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 48
| 1
|
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
SCREAMING_SNAKE_CASE__ : Dict = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ,) -> List[Any]:
output_path.parent.mkdir(parents=_SCREAMING_SNAKE_CASE ,exist_ok=_SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,f=output_path.as_posix() ,input_names=_SCREAMING_SNAKE_CASE ,output_names=_SCREAMING_SNAKE_CASE ,dynamic_axes=_SCREAMING_SNAKE_CASE ,do_constant_folding=_SCREAMING_SNAKE_CASE ,use_external_data_format=_SCREAMING_SNAKE_CASE ,enable_onnx_checker=_SCREAMING_SNAKE_CASE ,opset_version=_SCREAMING_SNAKE_CASE ,)
else:
export(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,f=output_path.as_posix() ,input_names=_SCREAMING_SNAKE_CASE ,output_names=_SCREAMING_SNAKE_CASE ,dynamic_axes=_SCREAMING_SNAKE_CASE ,do_constant_folding=_SCREAMING_SNAKE_CASE ,opset_version=_SCREAMING_SNAKE_CASE ,)
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ) -> List[Any]:
lowerCamelCase : Union[str, Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCamelCase : Union[str, Any] = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
lowerCamelCase : Dict = "cpu"
lowerCamelCase : int = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE ,torch_dtype=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = Path(_SCREAMING_SNAKE_CASE )
# TEXT ENCODER
lowerCamelCase : Dict = pipeline.text_encoder.config.max_position_embeddings
lowerCamelCase : Dict = pipeline.text_encoder.config.hidden_size
lowerCamelCase : Dict = pipeline.tokenizer(
"A sample prompt" ,padding="max_length" ,max_length=pipeline.tokenizer.model_max_length ,truncation=_SCREAMING_SNAKE_CASE ,return_tensors="pt" ,)
onnx_export(
pipeline.text_encoder ,model_args=(text_input.input_ids.to(device=_SCREAMING_SNAKE_CASE ,dtype=torch.intaa )) ,output_path=output_path / "text_encoder" / "model.onnx" ,ordered_input_names=["input_ids"] ,output_names=["last_hidden_state", "pooler_output"] ,dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
} ,opset=_SCREAMING_SNAKE_CASE ,)
del pipeline.text_encoder
# UNET
lowerCamelCase : int = pipeline.unet.config.in_channels
lowerCamelCase : Any = pipeline.unet.config.sample_size
lowerCamelCase : List[str] = output_path / "unet" / "model.onnx"
onnx_export(
pipeline.unet ,model_args=(
torch.randn(2 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE ,dtype=_SCREAMING_SNAKE_CASE ),
torch.randn(2 ).to(device=_SCREAMING_SNAKE_CASE ,dtype=_SCREAMING_SNAKE_CASE ),
torch.randn(2 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE ,dtype=_SCREAMING_SNAKE_CASE ),
False,
) ,output_path=_SCREAMING_SNAKE_CASE ,ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] ,output_names=["out_sample"] ,dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"timestep": {0: "batch"},
"encoder_hidden_states": {0: "batch", 1: "sequence"},
} ,opset=_SCREAMING_SNAKE_CASE ,use_external_data_format=_SCREAMING_SNAKE_CASE ,)
lowerCamelCase : List[str] = str(unet_path.absolute().as_posix() )
lowerCamelCase : Optional[int] = os.path.dirname(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = onnx.load(_SCREAMING_SNAKE_CASE )
# clean up existing tensor files
shutil.rmtree(_SCREAMING_SNAKE_CASE )
os.mkdir(_SCREAMING_SNAKE_CASE )
# collate external tensor files into one
onnx.save_model(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,save_as_external_data=_SCREAMING_SNAKE_CASE ,all_tensors_to_one_file=_SCREAMING_SNAKE_CASE ,location="weights.pb" ,convert_attribute=_SCREAMING_SNAKE_CASE ,)
del pipeline.unet
# VAE ENCODER
lowerCamelCase : int = pipeline.vae
lowerCamelCase : Optional[Any] = vae_encoder.config.in_channels
lowerCamelCase : int = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowerCamelCase : str = lambda _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE : vae_encoder.encode(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )[0].sample()
onnx_export(
_SCREAMING_SNAKE_CASE ,model_args=(
torch.randn(1 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE ,dtype=_SCREAMING_SNAKE_CASE ),
False,
) ,output_path=output_path / "vae_encoder" / "model.onnx" ,ordered_input_names=["sample", "return_dict"] ,output_names=["latent_sample"] ,dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} ,opset=_SCREAMING_SNAKE_CASE ,)
# VAE DECODER
lowerCamelCase : int = pipeline.vae
lowerCamelCase : Optional[int] = vae_decoder.config.latent_channels
lowerCamelCase : str = vae_decoder.config.out_channels
# forward only through the decoder part
lowerCamelCase : str = vae_encoder.decode
onnx_export(
_SCREAMING_SNAKE_CASE ,model_args=(
torch.randn(1 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE ,dtype=_SCREAMING_SNAKE_CASE ),
False,
) ,output_path=output_path / "vae_decoder" / "model.onnx" ,ordered_input_names=["latent_sample", "return_dict"] ,output_names=["sample"] ,dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} ,opset=_SCREAMING_SNAKE_CASE ,)
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowerCamelCase : int = pipeline.safety_checker
lowerCamelCase : str = safety_checker.config.vision_config.num_channels
lowerCamelCase : Tuple = safety_checker.config.vision_config.image_size
lowerCamelCase : int = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker ,model_args=(
torch.randn(
1 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,).to(device=_SCREAMING_SNAKE_CASE ,dtype=_SCREAMING_SNAKE_CASE ),
torch.randn(1 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE ,dtype=_SCREAMING_SNAKE_CASE ),
) ,output_path=output_path / "safety_checker" / "model.onnx" ,ordered_input_names=["clip_input", "images"] ,output_names=["out_images", "has_nsfw_concepts"] ,dynamic_axes={
"clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"images": {0: "batch", 1: "height", 2: "width", 3: "channels"},
} ,opset=_SCREAMING_SNAKE_CASE ,)
del pipeline.safety_checker
lowerCamelCase : List[Any] = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker" )
lowerCamelCase : Optional[Any] = pipeline.feature_extractor
else:
lowerCamelCase : List[Any] = None
lowerCamelCase : Optional[int] = None
lowerCamelCase : str = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder" ) ,vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder" ) ,text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder" ) ,tokenizer=pipeline.tokenizer ,unet=OnnxRuntimeModel.from_pretrained(output_path / "unet" ) ,scheduler=pipeline.scheduler ,safety_checker=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,requires_safety_checker=safety_checker is not None ,)
onnx_pipeline.save_pretrained(_SCREAMING_SNAKE_CASE )
print("ONNX pipeline saved to" ,_SCREAMING_SNAKE_CASE )
del pipeline
del onnx_pipeline
lowerCamelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE ,provider="CPUExecutionProvider" )
print("ONNX pipeline is loadable" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
SCREAMING_SNAKE_CASE__ : Any = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 48
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowerCAmelCase__ ) , """Tatoeba directory does not exist.""" )
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self ) -> int:
lowerCamelCase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=UpperCamelCase__ )
@slow
def _lowercase ( self ) -> List[Any]:
self.resolver.convert_models(["heb-eng"] )
@slow
def _lowercase ( self ) -> Tuple:
lowerCamelCase , lowerCamelCase : Dict = self.resolver.write_model_card("opus-mt-he-en" , dry_run=UpperCamelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 48
| 1
|
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self ) -> Any:
lowerCamelCase : Optional[int] = []
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
self.events.append("on_init_end" )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
self.events.append("on_train_begin" )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
self.events.append("on_train_end" )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> int:
self.events.append("on_epoch_begin" )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
self.events.append("on_epoch_end" )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
self.events.append("on_step_begin" )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
self.events.append("on_step_end" )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
self.events.append("on_evaluate" )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
self.events.append("on_predict" )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
self.events.append("on_save" )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> str:
self.events.append("on_log" )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
self.events.append("on_prediction_step" )
@require_torch
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : int = tempfile.mkdtemp()
def _lowercase ( self ) -> str:
shutil.rmtree(self.output_dir )
def _lowercase ( self , UpperCamelCase__=0 , UpperCamelCase__=0 , UpperCamelCase__=64 , UpperCamelCase__=64 , UpperCamelCase__=None , UpperCamelCase__=False , **UpperCamelCase__ ) -> Optional[Any]:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
lowerCamelCase : Dict = RegressionDataset(length=UpperCamelCase__ )
lowerCamelCase : str = RegressionDataset(length=UpperCamelCase__ )
lowerCamelCase : List[str] = RegressionModelConfig(a=UpperCamelCase__ , b=UpperCamelCase__ )
lowerCamelCase : List[Any] = RegressionPreTrainedModel(UpperCamelCase__ )
lowerCamelCase : Optional[int] = TrainingArguments(self.output_dir , disable_tqdm=UpperCamelCase__ , report_to=[] , **UpperCamelCase__ )
return Trainer(
UpperCamelCase__ , UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , callbacks=UpperCamelCase__ , )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
# Order doesn't matter
lowerCamelCase : Dict = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : cb.__name__ if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cb.__class__.__name__ )
lowerCamelCase : List[str] = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : cb.__name__ if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cb.__class__.__name__ )
for cba, cba in zip(UpperCamelCase__ , UpperCamelCase__ ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(UpperCamelCase__ , cba.__class__ )
elif not isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(cba.__class__ , UpperCamelCase__ )
else:
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> Optional[Any]:
lowerCamelCase : Tuple = ["on_init_end", "on_train_begin"]
lowerCamelCase : Union[str, Any] = 0
lowerCamelCase : Dict = len(trainer.get_eval_dataloader() )
lowerCamelCase : Union[str, Any] = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("on_epoch_begin" )
for _ in range(UpperCamelCase__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save" )
expected_events.append("on_epoch_end" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _lowercase ( self ) -> List[str]:
lowerCamelCase : List[str] = self.get_trainer()
lowerCamelCase : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ )
# Callbacks passed at init are added to the default callbacks
lowerCamelCase : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(UpperCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
lowerCamelCase : str = self.get_trainer(disable_tqdm=UpperCamelCase__ )
lowerCamelCase : List[str] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ )
def _lowercase ( self ) -> List[str]:
lowerCamelCase : List[Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
lowerCamelCase : Optional[int] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(UpperCamelCase__ )
expected_callbacks.remove(UpperCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ )
lowerCamelCase : str = self.get_trainer()
lowerCamelCase : Tuple = trainer.pop_callback(UpperCamelCase__ )
self.assertEqual(cb.__class__ , UpperCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ )
trainer.add_callback(UpperCamelCase__ )
expected_callbacks.insert(0 , UpperCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ )
# We can also add, pop, or remove by instance
lowerCamelCase : Optional[Any] = self.get_trainer()
lowerCamelCase : Optional[int] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(UpperCamelCase__ )
expected_callbacks.remove(UpperCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ )
lowerCamelCase : Tuple = self.get_trainer()
lowerCamelCase : Tuple = trainer.callback_handler.callbacks[0]
lowerCamelCase : List[str] = trainer.pop_callback(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ )
trainer.add_callback(UpperCamelCase__ )
expected_callbacks.insert(0 , UpperCamelCase__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase__ )
def _lowercase ( self ) -> List[str]:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=UpperCamelCase__ )
lowerCamelCase : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
lowerCamelCase : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase__ , self.get_expected_events(UpperCamelCase__ ) )
# Independent log/save/eval
lowerCamelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
lowerCamelCase : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase__ , self.get_expected_events(UpperCamelCase__ ) )
lowerCamelCase : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
lowerCamelCase : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase__ , self.get_expected_events(UpperCamelCase__ ) )
lowerCamelCase : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" )
trainer.train()
lowerCamelCase : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase__ , self.get_expected_events(UpperCamelCase__ ) )
lowerCamelCase : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" )
trainer.train()
lowerCamelCase : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase__ , self.get_expected_events(UpperCamelCase__ ) )
# A bit of everything
lowerCamelCase : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
lowerCamelCase : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase__ , self.get_expected_events(UpperCamelCase__ ) )
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning" ) as warn_mock:
lowerCamelCase : Optional[int] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(UpperCamelCase__ ) in warn_mock.call_args[0][0]
| 48
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
# Initialise PyTorch model
lowerCamelCase : Any = TaConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase : str = TaForConditionalGeneration(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 48
| 1
|
from __future__ import annotations
import typing
from collections import Counter
def A ( _SCREAMING_SNAKE_CASE ) -> typing.Counter[int]:
lowerCamelCase : typing.Counter[int] = Counter()
for base in range(1 ,max_perimeter + 1 ):
for perpendicular in range(_SCREAMING_SNAKE_CASE ,max_perimeter + 1 ):
lowerCamelCase : Tuple = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : Tuple = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def A ( _SCREAMING_SNAKE_CASE = 1000 ) -> int:
lowerCamelCase : Dict = pythagorean_triple(_SCREAMING_SNAKE_CASE )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f'''Perimeter {solution()} has maximum solutions''')
| 48
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
| 1
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=10 , UpperCamelCase__=3 , UpperCamelCase__=2 , UpperCamelCase__=2 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.02 , UpperCamelCase__="divided_space_time" , UpperCamelCase__=None , ) -> Dict:
lowerCamelCase : Optional[int] = parent
lowerCamelCase : List[Any] = batch_size
lowerCamelCase : Tuple = image_size
lowerCamelCase : Any = num_channels
lowerCamelCase : int = patch_size
lowerCamelCase : List[Any] = num_frames
lowerCamelCase : Dict = is_training
lowerCamelCase : Tuple = use_labels
lowerCamelCase : Optional[Any] = hidden_size
lowerCamelCase : int = num_hidden_layers
lowerCamelCase : List[Any] = num_attention_heads
lowerCamelCase : str = intermediate_size
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : int = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : str = attention_type
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : List[str] = scope
lowerCamelCase : List[Any] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowerCamelCase : List[str] = (image_size // patch_size) ** 2
lowerCamelCase : Optional[Any] = (num_frames) * self.num_patches_per_frame + 1
def _lowercase ( self ) -> List[str]:
lowerCamelCase : str = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Optional[int] = None
if self.use_labels:
lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def _lowercase ( self ) -> int:
lowerCamelCase : int = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowerCamelCase : Optional[int] = self.num_labels
return config
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
lowerCamelCase : int = TimesformerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase : List[str] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
lowerCamelCase : Any = TimesformerForVideoClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase : List[str] = model(UpperCamelCase__ )
# verify the logits shape
lowerCamelCase : List[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , UpperCamelCase__ )
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Any = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = config_and_inputs
lowerCamelCase : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Tuple = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowerCamelCase_ : int = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowerCamelCase_ : Union[str, Any] = False
lowerCamelCase_ : List[str] = False
lowerCamelCase_ : str = False
lowerCamelCase_ : str = False
def _lowercase ( self ) -> str:
lowerCamelCase : Union[str, Any] = TimesformerModelTester(self )
lowerCamelCase : Tuple = ConfigTester(
self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> List[str]:
lowerCamelCase : Tuple = copy.deepcopy(UpperCamelCase__ )
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def _lowercase ( self ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def _lowercase ( self ) -> Dict:
pass
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase , lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase , lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : str = model_class(UpperCamelCase__ )
lowerCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Union[str, Any] = [*signature.parameters.keys()]
lowerCamelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _lowercase ( self ) -> Dict:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self ) -> Tuple:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*UpperCamelCase__ )
@slow
def _lowercase ( self ) -> List[Any]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Optional[Any] = TimesformerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def _lowercase ( self ) -> Dict:
if not self.has_attentions:
pass
else:
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : List[str] = True
for model_class in self.all_model_classes:
lowerCamelCase : List[str] = self.model_tester.seq_length
lowerCamelCase : List[str] = self.model_tester.num_frames
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Tuple = False
lowerCamelCase : Dict = True
lowerCamelCase : Optional[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase : List[Any] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase : Optional[int] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase : int = True
lowerCamelCase : Dict = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase : Optional[int] = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase : List[str] = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowerCamelCase : Union[str, Any] = len(UpperCamelCase__ )
# Check attention is always last and order is fine
lowerCamelCase : List[str] = True
lowerCamelCase : List[Any] = True
lowerCamelCase : Dict = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase : int = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) )
lowerCamelCase : Tuple = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def _lowercase ( self ) -> List[str]:
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : str = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase : Any = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase : Tuple = outputs.hidden_states
lowerCamelCase : Optional[int] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowerCamelCase , lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Dict = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def A ( ) -> Dict:
lowerCamelCase : Dict = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" ,filename="eating_spaghetti.npy" ,repo_type="dataset" )
lowerCamelCase : Union[str, Any] = np.load(_SCREAMING_SNAKE_CASE )
return list(_SCREAMING_SNAKE_CASE )
@require_torch
@require_vision
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self ) -> List[str]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _lowercase ( self ) -> int:
lowerCamelCase : Tuple = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
UpperCamelCase__ )
lowerCamelCase : Any = self.default_image_processor
lowerCamelCase : Union[str, Any] = prepare_video()
lowerCamelCase : List[Any] = image_processor(video[:8] , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase : List[Any] = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase : Tuple = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase : Dict = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 48
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> list:
lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = []
for i in range(len(_SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
lowerCamelCase : Dict = True
for j in range(_SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
lowerCamelCase : Optional[int] = False
break
if match_found:
position.append(_SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 48
| 1
|
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
SCREAMING_SNAKE_CASE__ : str = re.compile(r'\b(a|an|the)\b', re.UNICODE)
SCREAMING_SNAKE_CASE__ : List[str] = None
def A ( ) -> List[Any]:
lowerCamelCase : Optional[int] = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" ,metavar="data.json" ,help="Input data JSON file." )
parser.add_argument("pred_file" ,metavar="pred.json" ,help="Model predictions." )
parser.add_argument(
"--out-file" ,"-o" ,metavar="eval.json" ,help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" ,"-n" ,metavar="na_prob.json" ,help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" ,"-t" ,type=_SCREAMING_SNAKE_CASE ,default=1.0 ,help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." ,)
parser.add_argument(
"--out-image-dir" ,"-p" ,metavar="out_images" ,default=_SCREAMING_SNAKE_CASE ,help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" ,"-v" ,action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def A ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowerCamelCase : Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCamelCase : Tuple = bool(qa["answers"]["text"] )
return qid_to_has_ans
def A ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
def remove_articles(_SCREAMING_SNAKE_CASE ):
return ARTICLES_REGEX.sub(" " ,_SCREAMING_SNAKE_CASE )
def white_space_fix(_SCREAMING_SNAKE_CASE ):
return " ".join(text.split() )
def remove_punc(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_SCREAMING_SNAKE_CASE ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_SCREAMING_SNAKE_CASE ) ) ) )
def A ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
if not s:
return []
return normalize_answer(_SCREAMING_SNAKE_CASE ).split()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
return int(normalize_answer(_SCREAMING_SNAKE_CASE ) == normalize_answer(_SCREAMING_SNAKE_CASE ) )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
lowerCamelCase : str = get_tokens(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = get_tokens(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = collections.Counter(_SCREAMING_SNAKE_CASE ) & collections.Counter(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = sum(common.values() )
if len(_SCREAMING_SNAKE_CASE ) == 0 or len(_SCREAMING_SNAKE_CASE ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
lowerCamelCase : List[str] = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = 1.0 * num_same / len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[str]:
lowerCamelCase : Any = {}
lowerCamelCase : List[Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCamelCase : Dict = qa["id"]
lowerCamelCase : List[str] = [t for t in qa["answers"]["text"] if normalize_answer(_SCREAMING_SNAKE_CASE )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCamelCase : List[Any] = [""]
if qid not in preds:
print(f'''Missing prediction for {qid}''' )
continue
lowerCamelCase : Any = preds[qid]
# Take max over all gold answers
lowerCamelCase : Dict = max(compute_exact(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for a in gold_answers )
lowerCamelCase : Optional[int] = max(compute_fa(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for a in gold_answers )
return exact_scores, fa_scores
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowerCamelCase : int = {}
for qid, s in scores.items():
lowerCamelCase : List[Any] = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCamelCase : Union[str, Any] = float(not qid_to_has_ans[qid] )
else:
lowerCamelCase : Tuple = s
return new_scores
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> str:
if not qid_list:
lowerCamelCase : List[str] = len(_SCREAMING_SNAKE_CASE )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values() ) / total),
("f1", 100.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
lowerCamelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 100.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
for k in new_eval:
lowerCamelCase : Tuple = new_eval[k]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
plt.step(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,color="b" ,alpha=0.2 ,where="post" )
plt.fill_between(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,step="post" ,alpha=0.2 ,color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_SCREAMING_SNAKE_CASE )
plt.savefig(_SCREAMING_SNAKE_CASE )
plt.clf()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ) -> List[str]:
lowerCamelCase : Tuple = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] )
lowerCamelCase : List[Any] = 0.0
lowerCamelCase : Union[str, Any] = 1.0
lowerCamelCase : int = 0.0
lowerCamelCase : Union[str, Any] = [1.0]
lowerCamelCase : str = [0.0]
lowerCamelCase : int = 0.0
for i, qid in enumerate(_SCREAMING_SNAKE_CASE ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCamelCase : Optional[int] = true_pos / float(i + 1 )
lowerCamelCase : Optional[int] = true_pos / float(_SCREAMING_SNAKE_CASE )
if i == len(_SCREAMING_SNAKE_CASE ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_SCREAMING_SNAKE_CASE )
recalls.append(_SCREAMING_SNAKE_CASE )
if out_image:
plot_pr_curve(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return {"ap": 100.0 * avg_prec}
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if out_image_dir and not os.path.exists(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
lowerCamelCase : int = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,out_image=os.path.join(_SCREAMING_SNAKE_CASE ,"pr_exact.png" ) ,title="Precision-Recall curve for Exact Match score" ,)
lowerCamelCase : Optional[int] = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,out_image=os.path.join(_SCREAMING_SNAKE_CASE ,"pr_f1.png" ) ,title="Precision-Recall curve for F1 score" ,)
lowerCamelCase : Union[str, Any] = {k: float(_SCREAMING_SNAKE_CASE ) for k, v in qid_to_has_ans.items()}
lowerCamelCase : List[str] = make_precision_recall_eval(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,out_image=os.path.join(_SCREAMING_SNAKE_CASE ,"pr_oracle.png" ) ,title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" ,)
merge_eval(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,"pr_exact" )
merge_eval(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,"pr_f1" )
merge_eval(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,"pr_oracle" )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
if not qid_list:
return
lowerCamelCase : Any = [na_probs[k] for k in qid_list]
lowerCamelCase : Optional[Any] = np.ones_like(_SCREAMING_SNAKE_CASE ) / float(len(_SCREAMING_SNAKE_CASE ) )
plt.hist(_SCREAMING_SNAKE_CASE ,weights=_SCREAMING_SNAKE_CASE ,bins=20 ,range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(f'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(_SCREAMING_SNAKE_CASE ,f'''na_prob_hist_{name}.png''' ) )
plt.clf()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
lowerCamelCase : List[str] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
lowerCamelCase : Dict = num_no_ans
lowerCamelCase : List[Any] = cur_score
lowerCamelCase : Optional[int] = 0.0
lowerCamelCase : Union[str, Any] = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : na_probs[k] )
for i, qid in enumerate(_SCREAMING_SNAKE_CASE ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCamelCase : Any = scores[qid]
else:
if preds[qid]:
lowerCamelCase : Union[str, Any] = -1
else:
lowerCamelCase : Optional[Any] = 0
cur_score += diff
if cur_score > best_score:
lowerCamelCase : Dict = cur_score
lowerCamelCase : Optional[int] = na_probs[qid]
return 100.0 * best_score / len(_SCREAMING_SNAKE_CASE ), best_thresh
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
lowerCamelCase , lowerCamelCase : List[Any] = find_best_thresh(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase , lowerCamelCase : Dict = find_best_thresh(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = best_exact
lowerCamelCase : str = exact_thresh
lowerCamelCase : Union[str, Any] = best_fa
lowerCamelCase : Union[str, Any] = fa_thresh
def A ( ) -> Union[str, Any]:
with open(OPTS.data_file ) as f:
lowerCamelCase : Tuple = json.load(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = dataset_json["data"]
with open(OPTS.pred_file ) as f:
lowerCamelCase : Optional[Any] = json.load(_SCREAMING_SNAKE_CASE )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
lowerCamelCase : Optional[int] = json.load(_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase : List[Any] = {k: 0.0 for k in preds}
lowerCamelCase : Optional[Any] = make_qid_to_has_ans(_SCREAMING_SNAKE_CASE ) # maps qid to True/False
lowerCamelCase : Tuple = [k for k, v in qid_to_has_ans.items() if v]
lowerCamelCase : Any = [k for k, v in qid_to_has_ans.items() if not v]
lowerCamelCase , lowerCamelCase : Optional[Any] = get_raw_scores(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,OPTS.na_prob_thresh )
lowerCamelCase : Optional[int] = apply_no_ans_threshold(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,OPTS.na_prob_thresh )
lowerCamelCase : Tuple = make_eval_dict(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if has_ans_qids:
lowerCamelCase : List[Any] = make_eval_dict(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,qid_list=_SCREAMING_SNAKE_CASE )
merge_eval(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,"HasAns" )
if no_ans_qids:
lowerCamelCase : Union[str, Any] = make_eval_dict(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,qid_list=_SCREAMING_SNAKE_CASE )
merge_eval(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,"NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,OPTS.out_image_dir )
histogram_na_prob(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,OPTS.out_image_dir ,"hasAns" )
histogram_na_prob(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,OPTS.out_image_dir ,"noAns" )
if OPTS.out_file:
with open(OPTS.out_file ,"w" ) as f:
json.dump(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
else:
print(json.dumps(_SCREAMING_SNAKE_CASE ,indent=2 ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 48
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
| 1
|
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
'python',
'tqdm',
'regex',
'requests',
'packaging',
'filelock',
'numpy',
'tokenizers',
'huggingface-hub',
'safetensors',
'accelerate',
'pyyaml',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> Optional[int]:
require_version(deps[pkg] ,_SCREAMING_SNAKE_CASE )
| 48
|
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def A ( _SCREAMING_SNAKE_CASE ) -> tuple:
return (data["data"], data["target"])
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> np.ndarray:
lowerCamelCase : List[str] = XGBRegressor(verbosity=0 ,random_state=42 )
xgb.fit(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Predict target for test data
lowerCamelCase : List[Any] = xgb.predict(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = predictions.reshape(len(_SCREAMING_SNAKE_CASE ) ,1 )
return predictions
def A ( ) -> None:
lowerCamelCase : Dict = fetch_california_housing()
lowerCamelCase , lowerCamelCase : Tuple = data_handling(_SCREAMING_SNAKE_CASE )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = train_test_split(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,test_size=0.25 ,random_state=1 )
lowerCamelCase : Any = xgboost(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}''' )
print(f'''Mean Square Error : {mean_squared_error(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 48
| 1
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,) -> List[str]:
if config_name_or_path is None:
lowerCamelCase : Any = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
lowerCamelCase : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCamelCase : Any = question_encoder_name_or_path
lowerCamelCase : str = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
lowerCamelCase : List[Any] = RagConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = gen_config
lowerCamelCase : Optional[Any] = question_encoder_config
lowerCamelCase : List[Any] = model_class.from_pretrained_question_encoder_generator(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
rag_model.save_pretrained(_SCREAMING_SNAKE_CASE )
# Sanity check.
model_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# Save tokenizers.
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
lowerCamelCase : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Optional[Any] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 48
|
from math import sqrt
def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int:
lowerCamelCase : int = 0
lowerCamelCase : int = 0
lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_SCREAMING_SNAKE_CASE ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 48
| 1
|
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
SCREAMING_SNAKE_CASE__ : int = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = 'hopper-medium-v2'
SCREAMING_SNAKE_CASE__ : List[str] = gym.make(env_name)
SCREAMING_SNAKE_CASE__ : Optional[Any] = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
SCREAMING_SNAKE_CASE__ : Tuple = env.reset()
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE__ : Tuple = 1000
SCREAMING_SNAKE_CASE__ : Optional[int] = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
SCREAMING_SNAKE_CASE__ : Any = pipeline(obs, planning_horizon=32)
# execute action in environment
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = env.step(denorm_actions)
SCREAMING_SNAKE_CASE__ : Any = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'''Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'''
f''' {total_score}'''
)
# save observations for rendering
rollout.append(next_observation.copy())
SCREAMING_SNAKE_CASE__ : int = next_observation
except KeyboardInterrupt:
pass
print(f'''Total reward: {total_reward}''')
| 48
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
SCREAMING_SNAKE_CASE__ : Dict = logging.getLogger(__name__)
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = """sequence-classification"""
def __init__( self , UpperCamelCase__ ) -> List[Any]:
if type(UpperCamelCase__ ) == dict:
lowerCamelCase : int = Namespace(**UpperCamelCase__ )
lowerCamelCase : str = glue_output_modes[hparams.task]
lowerCamelCase : int = glue_tasks_num_labels[hparams.task]
super().__init__(UpperCamelCase__ , UpperCamelCase__ , self.mode )
def _lowercase ( self , **UpperCamelCase__ ) -> Tuple:
return self.model(**UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
lowerCamelCase : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCamelCase : List[str] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
lowerCamelCase : Optional[int] = self(**UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = outputs[0]
lowerCamelCase : str = self.trainer.lr_schedulers[0]["scheduler"]
lowerCamelCase : Optional[int] = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _lowercase ( self ) -> str:
lowerCamelCase : Any = self.hparams
lowerCamelCase : Union[str, Any] = processors[args.task]()
lowerCamelCase : Optional[int] = processor.get_labels()
for mode in ["train", "dev"]:
lowerCamelCase : Optional[Any] = self._feature_file(UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , UpperCamelCase__ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowerCamelCase : List[str] = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
lowerCamelCase : Dict = convert_examples_to_features(
UpperCamelCase__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , UpperCamelCase__ )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> DataLoader:
lowerCamelCase : str = "dev" if mode == "test" else mode
lowerCamelCase : int = self._feature_file(UpperCamelCase__ )
logger.info("Loading features from cached file %s" , UpperCamelCase__ )
lowerCamelCase : str = torch.load(UpperCamelCase__ )
lowerCamelCase : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCamelCase : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
lowerCamelCase : List[str] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowerCamelCase : Any = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowerCamelCase : Union[str, Any] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , batch_size=UpperCamelCase__ , shuffle=UpperCamelCase__ , )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
lowerCamelCase : Dict = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCamelCase : Tuple = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
lowerCamelCase : Dict = self(**UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : Any = outputs[:2]
lowerCamelCase : Union[str, Any] = logits.detach().cpu().numpy()
lowerCamelCase : Optional[Any] = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowercase ( self , UpperCamelCase__ ) -> tuple:
lowerCamelCase : Union[str, Any] = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
lowerCamelCase : Optional[int] = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
lowerCamelCase : Union[str, Any] = np.argmax(UpperCamelCase__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowerCamelCase : str = np.squeeze(UpperCamelCase__ )
lowerCamelCase : List[Any] = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowerCamelCase : List[str] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase : Dict = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , UpperCamelCase__ , UpperCamelCase__ )}
lowerCamelCase : List[str] = dict(results.items() )
lowerCamelCase : Optional[int] = results
return ret, preds_list, out_label_list
def _lowercase ( self , UpperCamelCase__ ) -> dict:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = self._eval_end(UpperCamelCase__ )
lowerCamelCase : str = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowercase ( self , UpperCamelCase__ ) -> dict:
lowerCamelCase , lowerCamelCase , lowerCamelCase : str = self._eval_end(UpperCamelCase__ )
lowerCamelCase : str = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ )
parser.add_argument(
"--max_seq_length" , default=128 , type=UpperCamelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=UpperCamelCase__ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def A ( ) -> int:
lowerCamelCase : int = argparse.ArgumentParser()
add_generic_args(_SCREAMING_SNAKE_CASE ,os.getcwd() )
lowerCamelCase : str = GLUETransformer.add_model_specific_args(_SCREAMING_SNAKE_CASE ,os.getcwd() )
lowerCamelCase : str = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCamelCase : int = os.path.join(
"./results" ,f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' ,)
os.makedirs(args.output_dir )
lowerCamelCase : int = GLUETransformer(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Dict = generic_train(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir ,"checkpoint-epoch=*.ckpt" ) ,recursive=_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Tuple = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 48
| 1
|
import requests
SCREAMING_SNAKE_CASE__ : List[Any] = '' # <-- Put your OpenWeatherMap appid here!
SCREAMING_SNAKE_CASE__ : List[Any] = 'https://api.openweathermap.org/data/2.5/'
def A ( _SCREAMING_SNAKE_CASE = "Chicago" ,_SCREAMING_SNAKE_CASE = APPID ) -> dict:
return requests.get(URL_BASE + "weather" ,params=locals() ).json()
def A ( _SCREAMING_SNAKE_CASE = "Kolkata, India" ,_SCREAMING_SNAKE_CASE = APPID ) -> dict:
return requests.get(URL_BASE + "forecast" ,params=locals() ).json()
def A ( _SCREAMING_SNAKE_CASE = 55.68 ,_SCREAMING_SNAKE_CASE = 12.57 ,_SCREAMING_SNAKE_CASE = APPID ) -> dict:
return requests.get(URL_BASE + "onecall" ,params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
SCREAMING_SNAKE_CASE__ : Dict = input('Enter a location:').strip()
if location:
pprint(current_weather(location))
else:
break
| 48
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
lowerCamelCase : str = (boundary[1] - boundary[0]) / steps
lowerCamelCase : List[str] = boundary[0]
lowerCamelCase : Union[str, Any] = boundary[1]
lowerCamelCase : int = make_points(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = 0.0
y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE )
for i in x_i:
# print(i)
y += h * f(_SCREAMING_SNAKE_CASE )
y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE )
return y
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
lowerCamelCase : int = a + h
while x < (b - h):
yield x
lowerCamelCase : List[str] = x + h
def A ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # enter your function here
lowerCamelCase : str = (x - 0) * (x - 0)
return y
def A ( ) -> int:
lowerCamelCase : int = 0.0 # Lower bound of integration
lowerCamelCase : int = 1.0 # Upper bound of integration
lowerCamelCase : Dict = 10.0 # define number of steps or resolution
lowerCamelCase : int = [a, b] # define boundary of integration
lowerCamelCase : str = method_a(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 48
| 1
|
from __future__ import annotations
import math
import random
from typing import Any
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ) -> None:
lowerCamelCase : list[Any] = []
lowerCamelCase : int = 0
lowerCamelCase : int = 0
def _lowercase ( self ) -> bool:
return self.head == self.tail
def _lowercase ( self , UpperCamelCase__ ) -> None:
self.data.append(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = self.tail + 1
def _lowercase ( self ) -> Any:
lowerCamelCase : Optional[int] = self.data[self.head]
lowerCamelCase : List[Any] = self.head + 1
return ret
def _lowercase ( self ) -> int:
return self.tail - self.head
def _lowercase ( self ) -> None:
print(self.data )
print("**************" )
print(self.data[self.head : self.tail] )
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ ) -> None:
lowerCamelCase : str = data
lowerCamelCase : MyNode | None = None
lowerCamelCase : MyNode | None = None
lowerCamelCase : int = 1
def _lowercase ( self ) -> Any:
return self.data
def _lowercase ( self ) -> MyNode | None:
return self.left
def _lowercase ( self ) -> MyNode | None:
return self.right
def _lowercase ( self ) -> int:
return self.height
def _lowercase ( self , UpperCamelCase__ ) -> None:
lowerCamelCase : int = data
def _lowercase ( self , UpperCamelCase__ ) -> None:
lowerCamelCase : Dict = node
def _lowercase ( self , UpperCamelCase__ ) -> None:
lowerCamelCase : Optional[int] = node
def _lowercase ( self , UpperCamelCase__ ) -> None:
lowerCamelCase : str = height
def A ( _SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return node.get_height()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
if a > b:
return a
return b
def A ( _SCREAMING_SNAKE_CASE ) -> MyNode:
print("left rotation node:" ,node.get_data() )
lowerCamelCase : Optional[Any] = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Dict = my_max(get_height(node.get_right() ) ,get_height(node.get_left() ) ) + 1
node.set_height(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = my_max(get_height(ret.get_right() ) ,get_height(ret.get_left() ) ) + 1
ret.set_height(_SCREAMING_SNAKE_CASE )
return ret
def A ( _SCREAMING_SNAKE_CASE ) -> MyNode:
print("right rotation node:" ,node.get_data() )
lowerCamelCase : str = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = my_max(get_height(node.get_right() ) ,get_height(node.get_left() ) ) + 1
node.set_height(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = my_max(get_height(ret.get_right() ) ,get_height(ret.get_left() ) ) + 1
ret.set_height(_SCREAMING_SNAKE_CASE )
return ret
def A ( _SCREAMING_SNAKE_CASE ) -> MyNode:
lowerCamelCase : str = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_SCREAMING_SNAKE_CASE ) )
return right_rotation(_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE ) -> MyNode:
lowerCamelCase : Any = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_SCREAMING_SNAKE_CASE ) )
return left_rotation(_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> MyNode | None:
if node is None:
return MyNode(_SCREAMING_SNAKE_CASE )
if data < node.get_data():
node.set_left(insert_node(node.get_left() ,_SCREAMING_SNAKE_CASE ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
lowerCamelCase : Optional[Any] = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
lowerCamelCase : List[Any] = right_rotation(_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase : List[str] = lr_rotation(_SCREAMING_SNAKE_CASE )
else:
node.set_right(insert_node(node.get_right() ,_SCREAMING_SNAKE_CASE ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
lowerCamelCase : List[Any] = node.get_right()
assert right_child is not None
if data < right_child.get_data():
lowerCamelCase : Optional[Any] = rl_rotation(_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase : Optional[Any] = left_rotation(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = my_max(get_height(node.get_right() ) ,get_height(node.get_left() ) ) + 1
node.set_height(_SCREAMING_SNAKE_CASE )
return node
def A ( _SCREAMING_SNAKE_CASE ) -> Any:
while True:
lowerCamelCase : List[str] = root.get_right()
if right_child is None:
break
lowerCamelCase : List[str] = right_child
return root.get_data()
def A ( _SCREAMING_SNAKE_CASE ) -> Any:
while True:
lowerCamelCase : str = root.get_left()
if left_child is None:
break
lowerCamelCase : List[str] = left_child
return root.get_data()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> MyNode | None:
lowerCamelCase : int = root.get_left()
lowerCamelCase : int = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
lowerCamelCase : Dict = get_left_most(_SCREAMING_SNAKE_CASE )
root.set_data(_SCREAMING_SNAKE_CASE )
root.set_right(del_node(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
elif left_child is not None:
lowerCamelCase : Union[str, Any] = left_child
elif right_child is not None:
lowerCamelCase : Dict = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("No such data" )
return root
else:
root.set_left(del_node(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
if get_height(_SCREAMING_SNAKE_CASE ) - get_height(_SCREAMING_SNAKE_CASE ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
lowerCamelCase : Union[str, Any] = left_rotation(_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase : int = rl_rotation(_SCREAMING_SNAKE_CASE )
elif get_height(_SCREAMING_SNAKE_CASE ) - get_height(_SCREAMING_SNAKE_CASE ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
lowerCamelCase : Tuple = right_rotation(_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase : Any = lr_rotation(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = my_max(get_height(root.get_right() ) ,get_height(root.get_left() ) ) + 1
root.set_height(_SCREAMING_SNAKE_CASE )
return root
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ) -> None:
lowerCamelCase : MyNode | None = None
def _lowercase ( self ) -> int:
return get_height(self.root )
def _lowercase ( self , UpperCamelCase__ ) -> None:
print("insert:" + str(UpperCamelCase__ ) )
lowerCamelCase : Dict = insert_node(self.root , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> None:
print("delete:" + str(UpperCamelCase__ ) )
if self.root is None:
print("Tree is empty!" )
return
lowerCamelCase : Tuple = del_node(self.root , UpperCamelCase__ )
def __str__( self , ) -> str: # a level traversale, gives a more intuitive look on the tree
lowerCamelCase : Tuple = ""
lowerCamelCase : Optional[int] = MyQueue()
q.push(self.root )
lowerCamelCase : str = self.get_height()
if layer == 0:
return output
lowerCamelCase : Any = 0
while not q.is_empty():
lowerCamelCase : List[Any] = q.pop()
lowerCamelCase : Optional[Any] = " " * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(UpperCamelCase__ )
q.push(UpperCamelCase__ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
lowerCamelCase : Dict = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , UpperCamelCase__ ) - 1:
lowerCamelCase : Optional[int] = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def A ( ) -> None:
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
SCREAMING_SNAKE_CASE__ : Any = AVLtree()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(range(10))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 48
|
def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int:
lowerCamelCase : Tuple = 1
lowerCamelCase : int = 1
lowerCamelCase : Optional[Any] = {1: 1}
for inputa in range(2 ,_SCREAMING_SNAKE_CASE ):
lowerCamelCase : Union[str, Any] = 0
lowerCamelCase : List[str] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCamelCase : str = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCamelCase : str = counter
if counter > pre_counter:
lowerCamelCase : str = inputa
lowerCamelCase : Any = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 48
| 1
|
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 48
|
import argparse
import os
import re
SCREAMING_SNAKE_CASE__ : List[Any] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ) -> int:
with open(_SCREAMING_SNAKE_CASE ,"r" ,encoding="utf-8" ) as f:
lowerCamelCase : List[Any] = f.read()
lowerCamelCase : str = content.split("\n" )
lowerCamelCase : int = []
lowerCamelCase : List[Any] = 0
while line_idx < len(_SCREAMING_SNAKE_CASE ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowerCamelCase : Optional[int] = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowerCamelCase : Optional[int] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowerCamelCase : List[str] = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowerCamelCase : Union[str, Any] = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : _re_identifier.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(_SCREAMING_SNAKE_CASE ) )
elif "\n".join(_SCREAMING_SNAKE_CASE ) != content:
return True
def A ( _SCREAMING_SNAKE_CASE = False ) -> List[str]:
lowerCamelCase : str = [os.path.join(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for f in os.listdir(_SCREAMING_SNAKE_CASE ) if f.endswith(".py" )]
lowerCamelCase : Union[str, Any] = [sort_auto_mapping(_SCREAMING_SNAKE_CASE ,overwrite=_SCREAMING_SNAKE_CASE ) for fname in fnames]
if not overwrite and any(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : str = [f for f, d in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_SCREAMING_SNAKE_CASE )}. Run `make style` to fix'''
" this." )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 48
| 1
|
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ (metaclass=lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Any = ["""torch""", """scipy"""]
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
requires_backends(self , ["torch", "scipy"] )
@classmethod
def _lowercase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def _lowercase ( cls , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
requires_backends(cls , ["torch", "scipy"] )
| 48
|
def A ( _SCREAMING_SNAKE_CASE ) -> list:
if n_term == "":
return []
lowerCamelCase : list = []
for temp in range(int(_SCREAMING_SNAKE_CASE ) ):
series.append(f'''1/{temp + 1}''' if series else "1" )
return series
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Dict = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 48
| 1
|
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[Any] = '▁'
SCREAMING_SNAKE_CASE__ : int = {'vocab_file': 'vocab.txt', 'sentencepiece_model_ckpt': 'sentencepiece.bpe.model'}
SCREAMING_SNAKE_CASE__ : List[str] = {
'sentencepiece_model_file': 'sentencepiece.bpe.model',
'vocab_file': 'vocab.txt',
}
SCREAMING_SNAKE_CASE__ : List[str] = {
'vocab_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt',
},
'sentencepiece_model_file': {
'ernie-m-base': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
'ernie-m-large': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model',
},
}
SCREAMING_SNAKE_CASE__ : Any = {
'ernie-m-base': 514,
'ernie-m-large': 514,
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'ernie-m-base': {'do_lower_case': False},
'ernie-m-large': {'do_lower_case': False},
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : List[str] = ["input_ids"]
lowerCamelCase_ : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase_ : Dict = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Optional[Any] = RESOURCE_FILES_NAMES
def __init__( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__="utf8" , UpperCamelCase__="[UNK]" , UpperCamelCase__="[SEP]" , UpperCamelCase__="[PAD]" , UpperCamelCase__="[CLS]" , UpperCamelCase__="[MASK]" , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCamelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , vocab_file=UpperCamelCase__ , encoding=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
lowerCamelCase : str = do_lower_case
lowerCamelCase : Any = sentencepiece_model_ckpt
lowerCamelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowerCamelCase : str = self.load_vocab(filepath=UpperCamelCase__ )
else:
lowerCamelCase : Optional[Any] = {self.sp_model.id_to_piece(UpperCamelCase__ ): id for id in range(self.sp_model.get_piece_size() )}
lowerCamelCase : str = {v: k for k, v in self.vocab.items()}
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
if text is None:
return None
lowerCamelCase : Optional[Any] = self.tokenize(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : List[str] = "", []
for i, ch in enumerate(UpperCamelCase__ ):
if ch in self.SP_CHAR_MAPPING:
lowerCamelCase : List[Any] = self.SP_CHAR_MAPPING.get(UpperCamelCase__ )
else:
lowerCamelCase : Any = unicodedata.normalize("NFKC" , UpperCamelCase__ )
if self.is_whitespace(UpperCamelCase__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(UpperCamelCase__ ) )
lowerCamelCase , lowerCamelCase , lowerCamelCase : Dict = normalized_text, [], 0
if self.do_lower_case:
lowerCamelCase : Optional[int] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowerCamelCase : Optional[Any] = token[1:]
lowerCamelCase : Dict = text[offset:].index(UpperCamelCase__ ) + offset
lowerCamelCase : Union[str, Any] = start + len(UpperCamelCase__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowerCamelCase : str = end
return token_mapping
@property
def _lowercase ( self ) -> Any:
return len(self.vocab )
def _lowercase ( self ) -> List[str]:
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ) -> Optional[Any]:
lowerCamelCase : Union[str, Any] = self.__dict__.copy()
lowerCamelCase : Dict = None
return state
def __setstate__( self , UpperCamelCase__ ) -> Optional[Any]:
lowerCamelCase : Dict = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase : List[Any] = {}
lowerCamelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _lowercase ( self , UpperCamelCase__ ) -> Dict:
return "".join((self.SP_CHAR_MAPPING.get(UpperCamelCase__ , UpperCamelCase__ ) for c in text) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=64 , UpperCamelCase__=0.1 ) -> int:
if self.sp_model_kwargs.get("enable_sampling" ) is True:
lowerCamelCase : Tuple = True
if self.sp_model_kwargs.get("alpha" ) is not None:
lowerCamelCase : int = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
lowerCamelCase : str = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
lowerCamelCase : Dict = self.sp_model.EncodeAsPieces(UpperCamelCase__ )
else:
lowerCamelCase : Optional[int] = self.sp_model.SampleEncodeAsPieces(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = []
for pi, piece in enumerate(UpperCamelCase__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(UpperCamelCase__ ) and pi != 0:
new_pieces.append(UpperCamelCase__ )
continue
else:
continue
lowerCamelCase : Dict = 0
for i, chunk in enumerate(UpperCamelCase__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(UpperCamelCase__ ) or self.is_punct(UpperCamelCase__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(UpperCamelCase__ )
lowerCamelCase : List[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCamelCase : Any = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowerCamelCase : Optional[Any] = i
if len(UpperCamelCase__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _lowercase ( self , UpperCamelCase__ ) -> Dict:
lowerCamelCase : Optional[int] = "".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _lowercase ( self , UpperCamelCase__ ) -> Any:
lowerCamelCase : Union[str, Any] = self.convert_ids_to_tokens(UpperCamelCase__ )
lowerCamelCase : Tuple = "".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _lowercase ( self , UpperCamelCase__ ) -> Tuple:
return self.vocab.get(UpperCamelCase__ , self.vocab.get(self.unk_token ) )
def _lowercase ( self , UpperCamelCase__ ) -> int:
return self.reverse_vocab.get(UpperCamelCase__ , self.unk_token )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Tuple:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase : List[str] = [self.cls_token_id]
lowerCamelCase : Optional[Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Union[str, Any]:
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=False ) -> str:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(UpperCamelCase__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(UpperCamelCase__ ) + 1) + [1] * (len(UpperCamelCase__ ) + 3)
def _lowercase ( self , UpperCamelCase__ ) -> Union[str, Any]:
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _lowercase ( self , UpperCamelCase__ ) -> Optional[Any]:
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _lowercase ( self , UpperCamelCase__ ) -> Any:
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _lowercase ( self , UpperCamelCase__ ) -> int:
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(UpperCamelCase__ ) == 1:
lowerCamelCase : str = unicodedata.category(UpperCamelCase__ )
if cat == "Zs":
return True
return False
def _lowercase ( self , UpperCamelCase__ ) -> Dict:
lowerCamelCase : Any = {}
with io.open(UpperCamelCase__ , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = line.rstrip("\n" )
lowerCamelCase : Union[str, Any] = int(UpperCamelCase__ )
return token_to_idx
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
lowerCamelCase : Union[str, Any] = 0
if os.path.isdir(UpperCamelCase__ ):
lowerCamelCase : Optional[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
lowerCamelCase : str = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
lowerCamelCase : List[Any] = token_index
writer.write(token + "\n" )
index += 1
lowerCamelCase : Union[str, Any] = os.path.join(UpperCamelCase__ , "sentencepiece.bpe.model" )
with open(UpperCamelCase__ , "wb" ) as fi:
lowerCamelCase : Any = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (vocab_file,)
| 48
|
from __future__ import annotations
import requests
def A ( _SCREAMING_SNAKE_CASE ) -> dict:
lowerCamelCase : Tuple = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_SCREAMING_SNAKE_CASE ).json()
def A ( _SCREAMING_SNAKE_CASE = 10 ) -> list[dict]:
lowerCamelCase : str = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
lowerCamelCase : Any = requests.get(_SCREAMING_SNAKE_CASE ).json()[:max_stories]
return [get_hackernews_story(_SCREAMING_SNAKE_CASE ) for story_id in story_ids]
def A ( _SCREAMING_SNAKE_CASE = 10 ) -> str:
lowerCamelCase : str = hackernews_top_stories(_SCREAMING_SNAKE_CASE )
return "\n".join("* [{title}]({url})".format(**_SCREAMING_SNAKE_CASE ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 48
| 1
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {
'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json',
}
class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : List[str] = """resnet"""
lowerCamelCase_ : List[Any] = ["""basic""", """bottleneck"""]
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=64 , UpperCamelCase__=[256, 512, 1024, 2048] , UpperCamelCase__=[3, 4, 6, 3] , UpperCamelCase__="bottleneck" , UpperCamelCase__="relu" , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> str:
super().__init__(**UpperCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
lowerCamelCase : Optional[Any] = num_channels
lowerCamelCase : List[str] = embedding_size
lowerCamelCase : Any = hidden_sizes
lowerCamelCase : List[str] = depths
lowerCamelCase : Optional[int] = layer_type
lowerCamelCase : Any = hidden_act
lowerCamelCase : Optional[Any] = downsample_in_first_stage
lowerCamelCase : List[str] = ["stem"] + [F'''stage{idx}''' for idx in range(1 , len(UpperCamelCase__ ) + 1 )]
lowerCamelCase , lowerCamelCase : Tuple = get_aligned_output_features_output_indices(
out_features=UpperCamelCase__ , out_indices=UpperCamelCase__ , stage_names=self.stage_names )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : int = version.parse("""1.11""" )
@property
def _lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowercase ( self ) -> float:
return 1e-3
| 48
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {
'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json',
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = """blip_2_vision_model"""
def __init__( self , UpperCamelCase__=1408 , UpperCamelCase__=6144 , UpperCamelCase__=39 , UpperCamelCase__=16 , UpperCamelCase__=224 , UpperCamelCase__=14 , UpperCamelCase__="gelu" , UpperCamelCase__=0.00001 , UpperCamelCase__=0.0 , UpperCamelCase__=1e-10 , UpperCamelCase__=True , **UpperCamelCase__ , ) -> Optional[Any]:
super().__init__(**UpperCamelCase__ )
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Union[str, Any] = intermediate_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Dict = patch_size
lowerCamelCase : Tuple = image_size
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Union[str, Any] = attention_dropout
lowerCamelCase : Dict = layer_norm_eps
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : str = qkv_bias
@classmethod
def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : List[str] = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
lowerCamelCase : Optional[int] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = """blip_2_qformer"""
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0 , UpperCamelCase__="absolute" , UpperCamelCase__=2 , UpperCamelCase__=1408 , **UpperCamelCase__ , ) -> int:
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : Dict = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : int = hidden_act
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Dict = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Dict = max_position_embeddings
lowerCamelCase : List[str] = initializer_range
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : int = position_embedding_type
lowerCamelCase : Tuple = cross_attention_frequency
lowerCamelCase : Optional[int] = encoder_hidden_size
@classmethod
def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : str = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
lowerCamelCase : int = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : List[str] = """blip-2"""
lowerCamelCase_ : int = True
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=32 , **UpperCamelCase__ ) -> str:
super().__init__(**UpperCamelCase__ )
if vision_config is None:
lowerCamelCase : List[Any] = {}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." )
if qformer_config is None:
lowerCamelCase : List[Any] = {}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." )
if text_config is None:
lowerCamelCase : Any = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
lowerCamelCase : Optional[int] = BlipaVisionConfig(**UpperCamelCase__ )
lowerCamelCase : str = BlipaQFormerConfig(**UpperCamelCase__ )
lowerCamelCase : List[str] = text_config["model_type"] if "model_type" in text_config else "opt"
lowerCamelCase : str = CONFIG_MAPPING[text_model_type](**UpperCamelCase__ )
lowerCamelCase : Optional[Any] = self.text_config.tie_word_embeddings
lowerCamelCase : int = self.text_config.is_encoder_decoder
lowerCamelCase : Optional[Any] = num_query_tokens
lowerCamelCase : int = self.vision_config.hidden_size
lowerCamelCase : Tuple = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase : Dict = 1.0
lowerCamelCase : List[Any] = 0.02
@classmethod
def _lowercase ( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ , ) -> str:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase__ , )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Tuple = copy.deepcopy(self.__dict__ )
lowerCamelCase : Tuple = self.vision_config.to_dict()
lowerCamelCase : int = self.qformer_config.to_dict()
lowerCamelCase : Optional[Any] = self.text_config.to_dict()
lowerCamelCase : int = self.__class__.model_type
return output
| 48
| 1
|
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCamelCase__ :
'''simple docstring'''
@staticmethod
def _lowercase ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
pass
def A ( _SCREAMING_SNAKE_CASE ) -> Dict:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
SCREAMING_SNAKE_CASE__ : Dict = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
lowerCamelCase : str = pipeline(
"document-question-answering" , model=UpperCamelCase__ , tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = INVOICE_URL
lowerCamelCase : Optional[int] = list(zip(*apply_tesseract(load_image(UpperCamelCase__ ) , UpperCamelCase__ , "" ) ) )
lowerCamelCase : Optional[Any] = "What is the placebo?"
lowerCamelCase : int = [
{
"image": load_image(UpperCamelCase__ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
lowerCamelCase : Dict = dqa_pipeline(UpperCamelCase__ , top_k=2 )
self.assertEqual(
UpperCamelCase__ , [
[
{"score": ANY(UpperCamelCase__ ), "answer": ANY(UpperCamelCase__ ), "start": ANY(UpperCamelCase__ ), "end": ANY(UpperCamelCase__ )},
{"score": ANY(UpperCamelCase__ ), "answer": ANY(UpperCamelCase__ ), "start": ANY(UpperCamelCase__ ), "end": ANY(UpperCamelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Any = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
lowerCamelCase : Union[str, Any] = INVOICE_URL
lowerCamelCase : Tuple = "How many cats are there?"
lowerCamelCase : Dict = [
{"score": 0.0001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
lowerCamelCase : List[Any] = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase__ , decimals=4 ) , UpperCamelCase__ )
lowerCamelCase : int = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase__ , decimals=4 ) , UpperCamelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCamelCase : str = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowerCamelCase : List[Any] = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(UpperCamelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
lowerCamelCase : Dict = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowerCamelCase : Optional[int] = []
lowerCamelCase : List[Any] = []
lowerCamelCase : Optional[Any] = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , words=UpperCamelCase__ , boxes=UpperCamelCase__ , top_k=2 )
self.assertEqual(UpperCamelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self ) -> Any:
lowerCamelCase : Tuple = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
lowerCamelCase : List[str] = INVOICE_URL
lowerCamelCase : str = "What is the invoice number?"
lowerCamelCase : int = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase : Tuple = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase : Tuple = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _lowercase ( self ) -> Any:
lowerCamelCase : Optional[int] = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
lowerCamelCase : Dict = INVOICE_URL
lowerCamelCase : Optional[Any] = "What is the invoice number?"
lowerCamelCase : Tuple = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase : Optional[Any] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase : int = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : str = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=UpperCamelCase__ )
lowerCamelCase : Dict = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=UpperCamelCase__ , revision="3dc6de3" , )
lowerCamelCase : Dict = INVOICE_URL
lowerCamelCase : Any = "What is the invoice number?"
lowerCamelCase : Union[str, Any] = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
lowerCamelCase : Dict = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
lowerCamelCase : Optional[int] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
lowerCamelCase : int = list(zip(*apply_tesseract(load_image(UpperCamelCase__ ) , UpperCamelCase__ , "" ) ) )
# This model should also work if `image` is set to None
lowerCamelCase : List[str] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : Any = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=UpperCamelCase__ )
lowerCamelCase : Dict = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=UpperCamelCase__ , revision="3dc6de3" , max_seq_len=50 , )
lowerCamelCase : List[str] = INVOICE_URL
lowerCamelCase : Any = "What is the invoice number?"
lowerCamelCase : str = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase : Dict = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
[
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
lowerCamelCase : Tuple = list(zip(*apply_tesseract(load_image(UpperCamelCase__ ) , UpperCamelCase__ , "" ) ) )
# This model should also work if `image` is set to None
lowerCamelCase : Optional[Any] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def _lowercase ( self ) -> str:
lowerCamelCase : Optional[int] = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
lowerCamelCase : str = INVOICE_URL
lowerCamelCase : Any = "What is the invoice number?"
lowerCamelCase : Tuple = dqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase__ , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _lowercase ( self ) -> Tuple:
pass
| 48
|
import random
from .binary_exp_mod import bin_exp_mod
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=1000 ) -> List[str]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCamelCase : List[Any] = n - 1
lowerCamelCase : Dict = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCamelCase : Optional[Any] = 0
while count < prec:
lowerCamelCase : str = random.randint(2 ,n - 1 )
lowerCamelCase : Dict = bin_exp_mod(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if b != 1:
lowerCamelCase : str = True
for _ in range(_SCREAMING_SNAKE_CASE ):
if b == n - 1:
lowerCamelCase : Tuple = False
break
lowerCamelCase : int = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 48
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE__ : Tuple = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = """facebook/nllb-200-distilled-600M"""
lowerCamelCase_ : List[Any] = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
lowerCamelCase_ : Any = """translator"""
lowerCamelCase_ : Any = AutoTokenizer
lowerCamelCase_ : List[Any] = AutoModelForSeqaSeqLM
lowerCamelCase_ : Optional[int] = LANGUAGE_CODES
lowerCamelCase_ : Union[str, Any] = ["""text""", """text""", """text"""]
lowerCamelCase_ : Dict = ["""text"""]
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''' )
lowerCamelCase : Union[str, Any] = self.lang_to_code[src_lang]
lowerCamelCase : List[Any] = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCamelCase__ , return_tensors="pt" , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> Any:
return self.model.generate(**UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> Optional[Any]:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase__ )
| 48
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE__ : int = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE__ : str = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Tuple = 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : List[str] = 3
SCREAMING_SNAKE_CASE__ : Optional[int] = 4
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = VOCAB_FILES_NAMES
lowerCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : List[str] = """left"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<sep>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<cls>" , UpperCamelCase__="<mask>" , UpperCamelCase__=["<eop>", "<eod>"] , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
lowerCamelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
lowerCamelCase : Any = 3
lowerCamelCase : Optional[Any] = do_lower_case
lowerCamelCase : List[Any] = remove_space
lowerCamelCase : str = keep_accents
lowerCamelCase : List[Any] = vocab_file
lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def _lowercase ( self ) -> Optional[Any]:
return len(self.sp_model )
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : int = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
lowerCamelCase : Optional[int] = self.__dict__.copy()
lowerCamelCase : Union[str, Any] = None
return state
def __setstate__( self , UpperCamelCase__ ) -> int:
lowerCamelCase : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase : Any = {}
lowerCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self , UpperCamelCase__ ) -> Any:
if self.remove_space:
lowerCamelCase : Dict = " ".join(inputs.strip().split() )
else:
lowerCamelCase : Union[str, Any] = inputs
lowerCamelCase : Optional[Any] = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase : Optional[int] = unicodedata.normalize("NFKD" , UpperCamelCase__ )
lowerCamelCase : List[Any] = "".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
lowerCamelCase : List[str] = outputs.lower()
return outputs
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Optional[Any] = self.preprocess_text(UpperCamelCase__ )
lowerCamelCase : Dict = self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
lowerCamelCase : Dict = []
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase : Union[str, Any] = cur_pieces[1:]
else:
lowerCamelCase : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def _lowercase ( self , UpperCamelCase__ ) -> int:
return self.sp_model.PieceToId(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> Tuple:
return self.sp_model.IdToPiece(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = True , **UpperCamelCase__ , ) -> str:
lowerCamelCase : Optional[int] = kwargs.pop("use_source_tokenizer" , UpperCamelCase__ )
lowerCamelCase : Optional[int] = self.convert_ids_to_tokens(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase : Any = []
lowerCamelCase : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) )
lowerCamelCase : int = []
sub_texts.append(UpperCamelCase__ )
else:
current_sub_text.append(UpperCamelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ )
lowerCamelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase : int = self.clean_up_tokenization(UpperCamelCase__ )
return clean_text
else:
return text
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : str = [self.sep_token_id]
lowerCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1]
return ([0] * len(UpperCamelCase__ )) + [1, 1]
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : Any = [self.sep_token_id]
lowerCamelCase : List[str] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase : Union[str, Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 48
| 1
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=2 , UpperCamelCase__=24 , UpperCamelCase__=16 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.02 , UpperCamelCase__=None , UpperCamelCase__=2 , UpperCamelCase__=2 , ) -> List[Any]:
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Any = batch_size
lowerCamelCase : List[Any] = patch_size
lowerCamelCase : List[str] = max_length
lowerCamelCase : Optional[Any] = num_mel_bins
lowerCamelCase : Optional[int] = is_training
lowerCamelCase : Optional[int] = use_labels
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Union[str, Any] = num_hidden_layers
lowerCamelCase : str = num_attention_heads
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : int = type_sequence_label_size
lowerCamelCase : int = initializer_range
lowerCamelCase : Union[str, Any] = scope
lowerCamelCase : str = frequency_stride
lowerCamelCase : Union[str, Any] = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCamelCase : Optional[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
lowerCamelCase : Union[str, Any] = (self.max_length - self.patch_size) // self.time_stride + 1
lowerCamelCase : str = frequency_out_dimension * time_out_dimension
lowerCamelCase : str = num_patches + 2
def _lowercase ( self ) -> List[str]:
lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
lowerCamelCase : List[Any] = None
if self.use_labels:
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : int = self.get_config()
return config, input_values, labels
def _lowercase ( self ) -> Optional[Any]:
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
lowerCamelCase : Union[str, Any] = ASTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : int = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : int = config_and_inputs
lowerCamelCase : int = {"input_values": input_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Tuple = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase_ : Optional[int] = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : List[Any] = False
lowerCamelCase_ : Any = False
lowerCamelCase_ : List[Any] = False
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _lowercase ( self ) -> int:
lowerCamelCase : List[Any] = ASTModelTester(self )
lowerCamelCase : Any = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def _lowercase ( self ) -> List[Any]:
pass
def _lowercase ( self ) -> str:
lowerCamelCase , lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[str] = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def _lowercase ( self ) -> Dict:
lowerCamelCase , lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Dict = model_class(UpperCamelCase__ )
lowerCamelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : int = [*signature.parameters.keys()]
lowerCamelCase : Union[str, Any] = ["input_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@slow
def _lowercase ( self ) -> str:
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = ASTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def A ( ) -> Optional[Any]:
lowerCamelCase : Union[str, Any] = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" ,filename="sample_audio.flac" ,repo_type="dataset" )
lowerCamelCase , lowerCamelCase : List[str] = torchaudio.load(_SCREAMING_SNAKE_CASE )
return audio, sampling_rate
@require_torch
@require_torchaudio
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self ) -> List[Any]:
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : List[Any] = self.default_feature_extractor
lowerCamelCase : Dict = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(UpperCamelCase__ )
lowerCamelCase : Dict = self.default_feature_extractor
lowerCamelCase , lowerCamelCase : int = prepare_audio()
lowerCamelCase : Union[str, Any] = audio.squeeze().numpy()
lowerCamelCase : str = feature_extractor(UpperCamelCase__ , sampling_rate=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase : List[str] = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase : List[Any] = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 48
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
SCREAMING_SNAKE_CASE__ : Any = {
'b0': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 224,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 240,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 1408,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 260,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 1536,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 300,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 1792,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 380,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 2048,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 456,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 2304,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 528,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 2560,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 600,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def A ( _SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : int = EfficientNetConfig()
lowerCamelCase : List[str] = CONFIG_MAP[model_name]["hidden_dim"]
lowerCamelCase : List[str] = CONFIG_MAP[model_name]["width_coef"]
lowerCamelCase : Any = CONFIG_MAP[model_name]["depth_coef"]
lowerCamelCase : Union[str, Any] = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : Optional[int] = CONFIG_MAP[model_name]["dropout_rate"]
lowerCamelCase : str = CONFIG_MAP[model_name]["dw_padding"]
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "imagenet-1k-id2label.json"
lowerCamelCase : Any = 1000
lowerCamelCase : Any = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) )
lowerCamelCase : List[str] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : Any = {v: k for k, v in idalabel.items()}
return config
def A ( ) -> int:
lowerCamelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def A ( _SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : List[Any] = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : str = EfficientNetImageProcessor(
size={"height": size, "width": size} ,image_mean=[0.485, 0.456, 0.406] ,image_std=[0.47853944, 0.4732864, 0.47434163] ,do_center_crop=_SCREAMING_SNAKE_CASE ,)
return preprocessor
def A ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowerCamelCase : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
lowerCamelCase : Any = sorted(set(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE ,range(_SCREAMING_SNAKE_CASE ) )}
lowerCamelCase : List[Any] = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
lowerCamelCase : Dict = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
lowerCamelCase : Optional[int] = {}
for item in rename_keys:
if item[0] in original_param_names:
lowerCamelCase : List[str] = "efficientnet." + item[1]
lowerCamelCase : int = "classifier.weight"
lowerCamelCase : Union[str, Any] = "classifier.bias"
return key_mapping
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowerCamelCase : Tuple = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowerCamelCase : List[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 ,2 ,0 ,1 )
elif "depthwise_kernel" in key:
lowerCamelCase : int = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 ,3 ,0 ,1 )
elif "kernel" in key:
lowerCamelCase : List[str] = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) )
else:
lowerCamelCase : Optional[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowerCamelCase : Optional[int] = model_classes[model_name](
include_top=_SCREAMING_SNAKE_CASE ,weights="imagenet" ,input_tensor=_SCREAMING_SNAKE_CASE ,input_shape=_SCREAMING_SNAKE_CASE ,pooling=_SCREAMING_SNAKE_CASE ,classes=1000 ,classifier_activation="softmax" ,)
lowerCamelCase : List[Any] = original_model.trainable_variables
lowerCamelCase : Tuple = original_model.non_trainable_variables
lowerCamelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowerCamelCase : List[str] = param.numpy()
lowerCamelCase : int = list(tf_params.keys() )
# Load HuggingFace model
lowerCamelCase : Union[str, Any] = get_efficientnet_config(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
lowerCamelCase : Tuple = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
lowerCamelCase : Union[str, Any] = rename_keys(_SCREAMING_SNAKE_CASE )
replace_params(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
lowerCamelCase : int = convert_image_processor(_SCREAMING_SNAKE_CASE )
lowerCamelCase : int = preprocessor(images=prepare_img() ,return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowerCamelCase : Optional[Any] = hf_model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = outputs.logits.detach().numpy()
# Original model inference
lowerCamelCase : Optional[Any] = False
lowerCamelCase : Any = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : Optional[int] = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST )
lowerCamelCase : Union[str, Any] = image.img_to_array(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = np.expand_dims(_SCREAMING_SNAKE_CASE ,axis=0 )
lowerCamelCase : Dict = original_model.predict(_SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.mkdir(_SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
lowerCamelCase : int = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 48
| 1
|
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
SCREAMING_SNAKE_CASE__ : List[Any] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=1 ) -> str:
lowerCamelCase : List[str] = tokenizer
lowerCamelCase : Optional[int] = dataset
lowerCamelCase : str = len(UpperCamelCase__ ) if n_tasks is None else n_tasks
lowerCamelCase : List[Any] = n_copies
def __iter__( self ) -> Tuple:
lowerCamelCase : Tuple = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
lowerCamelCase : List[str] = self.tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
lowerCamelCase : str = start_length
lowerCamelCase : Tuple = eof_strings
lowerCamelCase : List[Any] = tokenizer
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> Union[str, Any]:
lowerCamelCase : List[str] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase : Tuple = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCamelCase__ )
def A ( _SCREAMING_SNAKE_CASE ) -> Any:
lowerCamelCase : List[Any] = re.split("(%s)" % "|".join(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
# last string should be ""
return "".join(string_list[:-2] )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=20 ,**_SCREAMING_SNAKE_CASE ) -> List[str]:
lowerCamelCase : Optional[Any] = defaultdict(_SCREAMING_SNAKE_CASE ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_SCREAMING_SNAKE_CASE ) ):
with torch.no_grad():
lowerCamelCase : List[str] = batch["ids"].shape[-1]
lowerCamelCase : Optional[int] = accelerator.unwrap_model(_SCREAMING_SNAKE_CASE ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] ,num_return_sequences=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
# each task is generated batch_size times
lowerCamelCase : int = batch["task_id"].repeat(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = accelerator.pad_across_processes(
_SCREAMING_SNAKE_CASE ,dim=1 ,pad_index=tokenizer.pad_token_id )
lowerCamelCase , lowerCamelCase : Any = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase : List[Any] = generated_tokens.cpu().numpy()
lowerCamelCase : Tuple = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
gen_token_dict[task].append(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase : List[Any] = tokenizer.decode(_SCREAMING_SNAKE_CASE ,skip_special_tokens=_SCREAMING_SNAKE_CASE ,clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
code_gens[task].append(remove_last_block(_SCREAMING_SNAKE_CASE ) )
return code_gens
def A ( ) -> Tuple:
# Setup configuration
lowerCamelCase : str = HfArgumentParser(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase : int = "false"
if args.num_workers is None:
lowerCamelCase : Optional[int] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase : Dict = Accelerator()
set_seed(args.seed ,device_specific=_SCREAMING_SNAKE_CASE )
# Load model and tokenizer
lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase : str = tokenizer.eos_token
lowerCamelCase : Dict = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase : List[Any] = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )] ),
}
# Load evaluation dataset and metric
lowerCamelCase : Optional[Any] = load_dataset("openai_humaneval" )
lowerCamelCase : str = load_metric("code_eval" )
lowerCamelCase : str = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
lowerCamelCase : List[Any] = args.n_samples // args.batch_size
lowerCamelCase : Union[str, Any] = TokenizedDataset(_SCREAMING_SNAKE_CASE ,human_eval["test"] ,n_copies=_SCREAMING_SNAKE_CASE ,n_tasks=_SCREAMING_SNAKE_CASE )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase : str = DataLoader(_SCREAMING_SNAKE_CASE ,batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase : Dict = code_eval_metric.compute(references=[""] ,predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
lowerCamelCase , lowerCamelCase : int = accelerator.prepare(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = complete_code(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,n_tasks=_SCREAMING_SNAKE_CASE ,batch_size=args.batch_size ,**_SCREAMING_SNAKE_CASE ,)
if accelerator.is_main_process:
lowerCamelCase : List[str] = []
for task in tqdm(range(_SCREAMING_SNAKE_CASE ) ):
lowerCamelCase : int = human_eval["test"][task]["test"]
lowerCamelCase : Any = f'''check({human_eval["test"][task]["entry_point"]})'''
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase , lowerCamelCase : Dict = code_eval_metric.compute(
references=_SCREAMING_SNAKE_CASE ,predictions=_SCREAMING_SNAKE_CASE ,num_workers=args.num_workers )
print(f'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file ,"w" ) as fp:
json.dump(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 48
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,) -> List[str]:
if config_name_or_path is None:
lowerCamelCase : Any = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
lowerCamelCase : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCamelCase : Any = question_encoder_name_or_path
lowerCamelCase : str = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
lowerCamelCase : List[Any] = RagConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = gen_config
lowerCamelCase : Optional[Any] = question_encoder_config
lowerCamelCase : List[Any] = model_class.from_pretrained_question_encoder_generator(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
rag_model.save_pretrained(_SCREAMING_SNAKE_CASE )
# Sanity check.
model_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# Save tokenizers.
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
lowerCamelCase : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Optional[Any] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 48
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Any = ["""image_processor""", """tokenizer"""]
lowerCamelCase_ : int = """CLIPImageProcessor"""
lowerCamelCase_ : Any = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Any:
lowerCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCamelCase__ , )
lowerCamelCase : Optional[Any] = kwargs.pop("feature_extractor" )
lowerCamelCase : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> int:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowerCamelCase : Tuple = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if images is not None:
lowerCamelCase : Any = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and images is not None:
lowerCamelCase : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def _lowercase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def _lowercase ( self ) -> Dict:
lowerCamelCase : Union[str, Any] = self.tokenizer.model_input_names
lowerCamelCase : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowercase ( self ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCamelCase__ , )
return self.image_processor_class
@property
def _lowercase ( self ) -> Dict:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCamelCase__ , )
return self.image_processor
| 48
|
import math
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float:
if (
not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float:
if (
not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def A ( _SCREAMING_SNAKE_CASE ) -> Any:
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] ,unknown_args[1::2] )}
def A ( ) -> Optional[int]:
lowerCamelCase : Dict = ArgumentParser(
"HuggingFace Datasets CLI tool" ,usage="datasets-cli <command> [<args>]" ,allow_abbrev=_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
TestCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
RunBeamCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
DummyDataCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
# Parse args
lowerCamelCase , lowerCamelCase : Union[str, Any] = parser.parse_known_args()
if not hasattr(_SCREAMING_SNAKE_CASE ,"func" ):
parser.print_help()
exit(1 )
lowerCamelCase : Dict = parse_unknown_args(_SCREAMING_SNAKE_CASE )
# Run
lowerCamelCase : Dict = args.func(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 48
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> Any:
lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowerCamelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> str:
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase : Optional[int] = ""
else:
lowerCamelCase : List[str] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase : List[str] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase : Optional[int] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : List[Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
lowerCamelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : List[Any] = in_proj_bias[-config.hidden_size :]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : List[str] = dct.pop(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = val
def A ( ) -> List[str]:
lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : str = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowerCamelCase : Union[str, Any] = DeiTConfig()
# all deit models have fine-tuned heads
lowerCamelCase : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase : Dict = 1000
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "imagenet-1k-id2label.json"
lowerCamelCase : List[Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) )
lowerCamelCase : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Dict = int(deit_name[-6:-4] )
lowerCamelCase : Optional[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
lowerCamelCase : Optional[Any] = 192
lowerCamelCase : List[str] = 768
lowerCamelCase : Tuple = 12
lowerCamelCase : Optional[Any] = 3
elif deit_name[9:].startswith("small" ):
lowerCamelCase : str = 384
lowerCamelCase : Optional[Any] = 1536
lowerCamelCase : Dict = 12
lowerCamelCase : Optional[int] = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
lowerCamelCase : str = 1024
lowerCamelCase : List[str] = 4096
lowerCamelCase : Any = 24
lowerCamelCase : Dict = 16
# load original model from timm
lowerCamelCase : List[Any] = timm.create_model(_SCREAMING_SNAKE_CASE ,pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase : Dict = timm_model.state_dict()
lowerCamelCase : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowerCamelCase : Optional[Any] = DeiTForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by DeiTImageProcessor
lowerCamelCase : Any = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=_SCREAMING_SNAKE_CASE ,crop_size=config.image_size )
lowerCamelCase : str = image_processor(images=prepare_img() ,return_tensors="pt" )
lowerCamelCase : int = encoding["pixel_values"]
lowerCamelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = timm_model(_SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE ,outputs.logits ,atol=1e-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 48
| 1
|
import numpy
# List of input, output pairs
SCREAMING_SNAKE_CASE__ : List[str] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
SCREAMING_SNAKE_CASE__ : List[Any] = (((515, 22, 13), 555), ((61, 35, 49), 150))
SCREAMING_SNAKE_CASE__ : Optional[int] = [2, 4, 1, 5]
SCREAMING_SNAKE_CASE__ : List[Any] = len(train_data)
SCREAMING_SNAKE_CASE__ : Any = 0.009
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE="train" ) -> str:
return calculate_hypothesis_value(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) - output(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE ) -> List[str]:
lowerCamelCase : Optional[Any] = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=m ) -> str:
lowerCamelCase : List[Any] = 0
for i in range(_SCREAMING_SNAKE_CASE ):
if index == -1:
summation_value += _error(_SCREAMING_SNAKE_CASE )
else:
summation_value += _error(_SCREAMING_SNAKE_CASE ) * train_data[i][0][index]
return summation_value
def A ( _SCREAMING_SNAKE_CASE ) -> Tuple:
lowerCamelCase : str = summation_of_cost_derivative(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) / m
return cost_derivative_value
def A ( ) -> str:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCamelCase : str = 0.000002
lowerCamelCase : List[str] = 0
lowerCamelCase : List[Any] = 0
while True:
j += 1
lowerCamelCase : Dict = [0, 0, 0, 0]
for i in range(0 ,len(_SCREAMING_SNAKE_CASE ) ):
lowerCamelCase : List[Any] = get_cost_derivative(i - 1 )
lowerCamelCase : Dict = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=_SCREAMING_SNAKE_CASE ,rtol=_SCREAMING_SNAKE_CASE ,):
break
lowerCamelCase : Dict = temp_parameter_vector
print(("Number of iterations:", j) )
def A ( ) -> Dict:
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
print(("Actual output value:", output(_SCREAMING_SNAKE_CASE ,"test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(_SCREAMING_SNAKE_CASE ,"test" )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 48
|
import random
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> tuple:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
lowerCamelCase : List[Any] = items[random.randint(0 ,len(_SCREAMING_SNAKE_CASE ) - 1 )]
lowerCamelCase : Dict = 0
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = _partition(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE ,index - (m + count) )
| 48
| 1
|
SCREAMING_SNAKE_CASE__ : Optional[int] = 65521
def A ( _SCREAMING_SNAKE_CASE ) -> int:
lowerCamelCase : List[str] = 1
lowerCamelCase : str = 0
for plain_chr in plain_text:
lowerCamelCase : Dict = (a + ord(_SCREAMING_SNAKE_CASE )) % MOD_ADLER
lowerCamelCase : Any = (b + a) % MOD_ADLER
return (b << 16) | a
| 48
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
return x if y == 0 else greatest_common_divisor(_SCREAMING_SNAKE_CASE ,x % y )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
return (x * y) // greatest_common_divisor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE = 20 ) -> int:
lowerCamelCase : List[Any] = 1
for i in range(1 ,n + 1 ):
lowerCamelCase : List[str] = lcm(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 48
| 1
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=1024 ,_SCREAMING_SNAKE_CASE=1024 ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ) -> Tuple:
lowerCamelCase : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = SeqaSeqDataset(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,type_path="train" ,**_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = tok.pad_token_id
def get_lens(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : Any = tqdm(
DataLoader(_SCREAMING_SNAKE_CASE ,batch_size=512 ,num_workers=8 ,shuffle=_SCREAMING_SNAKE_CASE ,collate_fn=ds.collate_fn ) ,desc=str(ds.len_file ) ,)
lowerCamelCase : Optional[int] = []
for batch in dl:
lowerCamelCase : List[Any] = batch["input_ids"].ne(_SCREAMING_SNAKE_CASE ).sum(1 ).tolist()
lowerCamelCase : List[Any] = batch["labels"].ne(_SCREAMING_SNAKE_CASE ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
max_lens.append(max(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
else:
max_lens.extend(_SCREAMING_SNAKE_CASE )
return max_lens
lowerCamelCase : List[Any] = get_lens(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = SeqaSeqDataset(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,type_path="val" ,**_SCREAMING_SNAKE_CASE )
lowerCamelCase : Dict = get_lens(_SCREAMING_SNAKE_CASE )
pickle_save(_SCREAMING_SNAKE_CASE ,train_ds.len_file )
pickle_save(_SCREAMING_SNAKE_CASE ,val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 48
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowerCAmelCase__ ) , """Tatoeba directory does not exist.""" )
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self ) -> int:
lowerCamelCase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=UpperCamelCase__ )
@slow
def _lowercase ( self ) -> List[Any]:
self.resolver.convert_models(["heb-eng"] )
@slow
def _lowercase ( self ) -> Tuple:
lowerCamelCase , lowerCamelCase : Dict = self.resolver.write_model_card("opus-mt-he-en" , dry_run=UpperCamelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 48
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json',
'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json',
'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json',
'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json',
'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json',
'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json',
'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json',
'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json',
'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json',
'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json',
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = """rwkv"""
lowerCamelCase_ : List[Any] = {"""max_position_embeddings""": """context_length"""}
def __init__( self , UpperCamelCase__=5_0277 , UpperCamelCase__=1024 , UpperCamelCase__=4096 , UpperCamelCase__=32 , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=1e-5 , UpperCamelCase__=0 , UpperCamelCase__=0 , UpperCamelCase__=6 , UpperCamelCase__=False , UpperCamelCase__=True , **UpperCamelCase__ , ) -> Union[str, Any]:
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Tuple = context_length
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Optional[int] = num_hidden_layers
lowerCamelCase : List[str] = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowerCamelCase : Union[str, Any] = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowerCamelCase : Tuple = layer_norm_epsilon
lowerCamelCase : int = rescale_every
lowerCamelCase : List[str] = use_cache
lowerCamelCase : Optional[int] = bos_token_id
lowerCamelCase : Optional[int] = eos_token_id
super().__init__(
tie_word_embeddings=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 48
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
# Initialise PyTorch model
lowerCamelCase : Any = TaConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase : str = TaForConditionalGeneration(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 48
| 1
|
SCREAMING_SNAKE_CASE__ : Dict = {str(digit): digit**5 for digit in range(10)}
def A ( _SCREAMING_SNAKE_CASE ) -> int:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_SCREAMING_SNAKE_CASE ) )
def A ( ) -> int:
return sum(
number
for number in range(1000 ,100_0000 )
if number == digits_fifth_powers_sum(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
print(solution())
| 48
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
| 1
|
from sklearn.metrics import fa_score
import datasets
SCREAMING_SNAKE_CASE__ : Optional[int] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
SCREAMING_SNAKE_CASE__ : Dict = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
SCREAMING_SNAKE_CASE__ : Optional[int] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ (datasets.Metric ):
'''simple docstring'''
def _lowercase ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=1 , UpperCamelCase__="binary" , UpperCamelCase__=None ) -> Union[str, Any]:
lowerCamelCase : Dict = fa_score(
UpperCamelCase__ , UpperCamelCase__ , labels=UpperCamelCase__ , pos_label=UpperCamelCase__ , average=UpperCamelCase__ , sample_weight=UpperCamelCase__ )
return {"f1": float(UpperCamelCase__ ) if score.size == 1 else score}
| 48
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> list:
lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = []
for i in range(len(_SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
lowerCamelCase : Dict = True
for j in range(_SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
lowerCamelCase : Optional[int] = False
break
if match_found:
position.append(_SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 48
| 1
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
return int(input_a == input_a == 0 )
def A ( ) -> None:
print("Truth Table of NOR Gate:" )
print("| Input 1 | Input 2 | Output |" )
print(f'''| 0 | 0 | {nor_gate(0 ,0 )} |''' )
print(f'''| 0 | 1 | {nor_gate(0 ,1 )} |''' )
print(f'''| 1 | 0 | {nor_gate(1 ,0 )} |''' )
print(f'''| 1 | 1 | {nor_gate(1 ,1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 48
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
| 1
|
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=4 , ) -> List[Any]:
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Any = batch_size
lowerCamelCase : Dict = seq_length
lowerCamelCase : Optional[int] = is_training
lowerCamelCase : int = use_attention_mask
lowerCamelCase : Union[str, Any] = use_token_type_ids
lowerCamelCase : List[str] = use_labels
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : Tuple = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : str = hidden_act
lowerCamelCase : Any = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[str] = max_position_embeddings
lowerCamelCase : Optional[Any] = type_vocab_size
lowerCamelCase : Tuple = type_sequence_label_size
lowerCamelCase : Any = initializer_range
lowerCamelCase : Optional[Any] = num_choices
def _lowercase ( self ) -> Dict:
lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : str = None
if self.use_attention_mask:
lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Optional[Any] = None
if self.use_token_type_ids:
lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase : Optional[int] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowercase ( self ) -> Tuple:
lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : int = config_and_inputs
lowerCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : str = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = config_and_inputs
lowerCamelCase : List[Any] = True
lowerCamelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : str = True
lowerCamelCase_ : List[Any] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Optional[int] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def _lowercase ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
lowerCamelCase : Dict = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ )
lowerCamelCase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
@require_flax
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ) -> Any:
lowerCamelCase : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ )
lowerCamelCase : Tuple = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
lowerCamelCase : List[str] = model(UpperCamelCase__ )[0]
lowerCamelCase : List[str] = [1, 11, 5_0265]
self.assertEqual(list(output.shape ) , UpperCamelCase__ )
# compare the actual values for a slice.
lowerCamelCase : Dict = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : List[str] = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ )
lowerCamelCase : Optional[int] = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
lowerCamelCase : List[str] = model(UpperCamelCase__ )[0]
# compare the actual values for a slice.
lowerCamelCase : Dict = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 48
|
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def A ( _SCREAMING_SNAKE_CASE ) -> tuple:
return (data["data"], data["target"])
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> np.ndarray:
lowerCamelCase : List[str] = XGBRegressor(verbosity=0 ,random_state=42 )
xgb.fit(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Predict target for test data
lowerCamelCase : List[Any] = xgb.predict(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = predictions.reshape(len(_SCREAMING_SNAKE_CASE ) ,1 )
return predictions
def A ( ) -> None:
lowerCamelCase : Dict = fetch_california_housing()
lowerCamelCase , lowerCamelCase : Tuple = data_handling(_SCREAMING_SNAKE_CASE )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = train_test_split(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,test_size=0.25 ,random_state=1 )
lowerCamelCase : Any = xgboost(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}''' )
print(f'''Mean Square Error : {mean_squared_error(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 48
| 1
|
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
SCREAMING_SNAKE_CASE__ : Optional[int] = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> Optional[Any]:
# Initialise PyTorch model
lowerCamelCase : str = XLNetConfig.from_json_file(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
lowerCamelCase : Dict = finetuning_task
lowerCamelCase : int = GLUE_TASKS_NUM_LABELS[finetuning_task]
lowerCamelCase : int = XLNetForSequenceClassification(_SCREAMING_SNAKE_CASE )
elif "squad" in finetuning_task:
lowerCamelCase : Tuple = finetuning_task
lowerCamelCase : Dict = XLNetForQuestionAnswering(_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase : Union[str, Any] = XLNetLMHeadModel(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Save pytorch-model
lowerCamelCase : List[str] = os.path.join(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = os.path.join(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
print(f'''Save PyTorch model to {os.path.abspath(_SCREAMING_SNAKE_CASE )}''' )
torch.save(model.state_dict() ,_SCREAMING_SNAKE_CASE )
print(f'''Save configuration file to {os.path.abspath(_SCREAMING_SNAKE_CASE )}''' )
with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 48
|
from math import sqrt
def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int:
lowerCamelCase : int = 0
lowerCamelCase : int = 0
lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_SCREAMING_SNAKE_CASE ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 48
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'configuration_roberta_prelayernorm': [
'ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP',
'RobertaPreLayerNormConfig',
'RobertaPreLayerNormOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
'ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaPreLayerNormForCausalLM',
'RobertaPreLayerNormForMaskedLM',
'RobertaPreLayerNormForMultipleChoice',
'RobertaPreLayerNormForQuestionAnswering',
'RobertaPreLayerNormForSequenceClassification',
'RobertaPreLayerNormForTokenClassification',
'RobertaPreLayerNormModel',
'RobertaPreLayerNormPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
'TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaPreLayerNormForCausalLM',
'TFRobertaPreLayerNormForMaskedLM',
'TFRobertaPreLayerNormForMultipleChoice',
'TFRobertaPreLayerNormForQuestionAnswering',
'TFRobertaPreLayerNormForSequenceClassification',
'TFRobertaPreLayerNormForTokenClassification',
'TFRobertaPreLayerNormMainLayer',
'TFRobertaPreLayerNormModel',
'TFRobertaPreLayerNormPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : int = [
'FlaxRobertaPreLayerNormForCausalLM',
'FlaxRobertaPreLayerNormForMaskedLM',
'FlaxRobertaPreLayerNormForMultipleChoice',
'FlaxRobertaPreLayerNormForQuestionAnswering',
'FlaxRobertaPreLayerNormForSequenceClassification',
'FlaxRobertaPreLayerNormForTokenClassification',
'FlaxRobertaPreLayerNormModel',
'FlaxRobertaPreLayerNormPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
SCREAMING_SNAKE_CASE__ : Dict = logging.getLogger(__name__)
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = """sequence-classification"""
def __init__( self , UpperCamelCase__ ) -> List[Any]:
if type(UpperCamelCase__ ) == dict:
lowerCamelCase : int = Namespace(**UpperCamelCase__ )
lowerCamelCase : str = glue_output_modes[hparams.task]
lowerCamelCase : int = glue_tasks_num_labels[hparams.task]
super().__init__(UpperCamelCase__ , UpperCamelCase__ , self.mode )
def _lowercase ( self , **UpperCamelCase__ ) -> Tuple:
return self.model(**UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
lowerCamelCase : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCamelCase : List[str] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
lowerCamelCase : Optional[int] = self(**UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = outputs[0]
lowerCamelCase : str = self.trainer.lr_schedulers[0]["scheduler"]
lowerCamelCase : Optional[int] = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _lowercase ( self ) -> str:
lowerCamelCase : Any = self.hparams
lowerCamelCase : Union[str, Any] = processors[args.task]()
lowerCamelCase : Optional[int] = processor.get_labels()
for mode in ["train", "dev"]:
lowerCamelCase : Optional[Any] = self._feature_file(UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , UpperCamelCase__ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowerCamelCase : List[str] = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
lowerCamelCase : Dict = convert_examples_to_features(
UpperCamelCase__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , UpperCamelCase__ )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> DataLoader:
lowerCamelCase : str = "dev" if mode == "test" else mode
lowerCamelCase : int = self._feature_file(UpperCamelCase__ )
logger.info("Loading features from cached file %s" , UpperCamelCase__ )
lowerCamelCase : str = torch.load(UpperCamelCase__ )
lowerCamelCase : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCamelCase : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
lowerCamelCase : List[str] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowerCamelCase : Any = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowerCamelCase : Union[str, Any] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , batch_size=UpperCamelCase__ , shuffle=UpperCamelCase__ , )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
lowerCamelCase : Dict = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCamelCase : Tuple = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
lowerCamelCase : Dict = self(**UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : Any = outputs[:2]
lowerCamelCase : Union[str, Any] = logits.detach().cpu().numpy()
lowerCamelCase : Optional[Any] = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowercase ( self , UpperCamelCase__ ) -> tuple:
lowerCamelCase : Union[str, Any] = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
lowerCamelCase : Optional[int] = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
lowerCamelCase : Union[str, Any] = np.argmax(UpperCamelCase__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowerCamelCase : str = np.squeeze(UpperCamelCase__ )
lowerCamelCase : List[Any] = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowerCamelCase : List[str] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase : Dict = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , UpperCamelCase__ , UpperCamelCase__ )}
lowerCamelCase : List[str] = dict(results.items() )
lowerCamelCase : Optional[int] = results
return ret, preds_list, out_label_list
def _lowercase ( self , UpperCamelCase__ ) -> dict:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = self._eval_end(UpperCamelCase__ )
lowerCamelCase : str = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowercase ( self , UpperCamelCase__ ) -> dict:
lowerCamelCase , lowerCamelCase , lowerCamelCase : str = self._eval_end(UpperCamelCase__ )
lowerCamelCase : str = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ )
parser.add_argument(
"--max_seq_length" , default=128 , type=UpperCamelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=UpperCamelCase__ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def A ( ) -> int:
lowerCamelCase : int = argparse.ArgumentParser()
add_generic_args(_SCREAMING_SNAKE_CASE ,os.getcwd() )
lowerCamelCase : str = GLUETransformer.add_model_specific_args(_SCREAMING_SNAKE_CASE ,os.getcwd() )
lowerCamelCase : str = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCamelCase : int = os.path.join(
"./results" ,f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' ,)
os.makedirs(args.output_dir )
lowerCamelCase : int = GLUETransformer(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Dict = generic_train(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir ,"checkpoint-epoch=*.ckpt" ) ,recursive=_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Tuple = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 48
| 1
|
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
SCREAMING_SNAKE_CASE__ : Any = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
SCREAMING_SNAKE_CASE__ : str = {
'facebook/blenderbot_small-90M': 512,
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Optional[Any] = BlenderbotSmallTokenizer
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__="<|endoftext|>" , UpperCamelCase__=False , UpperCamelCase__=True , **UpperCamelCase__ , ) -> Optional[Any]:
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCamelCase__ , merges=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , ) , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase : List[str] = add_prefix_space
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> Any:
lowerCamelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : Tuple = [self.sep_token_id]
lowerCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 48
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
lowerCamelCase : str = (boundary[1] - boundary[0]) / steps
lowerCamelCase : List[str] = boundary[0]
lowerCamelCase : Union[str, Any] = boundary[1]
lowerCamelCase : int = make_points(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = 0.0
y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE )
for i in x_i:
# print(i)
y += h * f(_SCREAMING_SNAKE_CASE )
y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE )
return y
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
lowerCamelCase : int = a + h
while x < (b - h):
yield x
lowerCamelCase : List[str] = x + h
def A ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # enter your function here
lowerCamelCase : str = (x - 0) * (x - 0)
return y
def A ( ) -> int:
lowerCamelCase : int = 0.0 # Lower bound of integration
lowerCamelCase : int = 1.0 # Upper bound of integration
lowerCamelCase : Dict = 10.0 # define number of steps or resolution
lowerCamelCase : int = [a, b] # define boundary of integration
lowerCamelCase : str = method_a(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 48
| 1
|
def A ( _SCREAMING_SNAKE_CASE ) -> int:
lowerCamelCase : list[list[int]] = [[0 for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(m + 1 )]
for i in range(m + 1 ):
lowerCamelCase : Optional[int] = 1
for n in range(m + 1 ):
for k in range(1 ,_SCREAMING_SNAKE_CASE ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
SCREAMING_SNAKE_CASE__ : List[str] = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 48
|
def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int:
lowerCamelCase : Tuple = 1
lowerCamelCase : int = 1
lowerCamelCase : Optional[Any] = {1: 1}
for inputa in range(2 ,_SCREAMING_SNAKE_CASE ):
lowerCamelCase : Union[str, Any] = 0
lowerCamelCase : List[str] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowerCamelCase : str = (3 * number) + 1
counter += 1
if inputa not in counters:
lowerCamelCase : str = counter
if counter > pre_counter:
lowerCamelCase : str = inputa
lowerCamelCase : Any = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 48
| 1
|
import math
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float:
if (
not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float:
if (
not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
|
import argparse
import os
import re
SCREAMING_SNAKE_CASE__ : List[Any] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ) -> int:
with open(_SCREAMING_SNAKE_CASE ,"r" ,encoding="utf-8" ) as f:
lowerCamelCase : List[Any] = f.read()
lowerCamelCase : str = content.split("\n" )
lowerCamelCase : int = []
lowerCamelCase : List[Any] = 0
while line_idx < len(_SCREAMING_SNAKE_CASE ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowerCamelCase : Optional[int] = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowerCamelCase : Optional[int] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowerCamelCase : List[str] = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowerCamelCase : Union[str, Any] = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : _re_identifier.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(_SCREAMING_SNAKE_CASE ) )
elif "\n".join(_SCREAMING_SNAKE_CASE ) != content:
return True
def A ( _SCREAMING_SNAKE_CASE = False ) -> List[str]:
lowerCamelCase : str = [os.path.join(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for f in os.listdir(_SCREAMING_SNAKE_CASE ) if f.endswith(".py" )]
lowerCamelCase : Union[str, Any] = [sort_auto_mapping(_SCREAMING_SNAKE_CASE ,overwrite=_SCREAMING_SNAKE_CASE ) for fname in fnames]
if not overwrite and any(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : str = [f for f, d in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_SCREAMING_SNAKE_CASE )}. Run `make style` to fix'''
" this." )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 48
| 1
|
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def A ( _SCREAMING_SNAKE_CASE ) -> float:
return np.dot(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , *,
UpperCamelCase__ = np.inf , UpperCamelCase__ = "linear" , UpperCamelCase__ = 0.0 , ) -> None:
lowerCamelCase : Tuple = regularization
lowerCamelCase : List[str] = gamma
if kernel == "linear":
lowerCamelCase : Optional[Any] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("rbf kernel requires gamma" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("gamma must be float or int" )
if not self.gamma > 0:
raise ValueError("gamma must be > 0" )
lowerCamelCase : Any = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowerCamelCase : Optional[int] = F'''Unknown kernel: {kernel}'''
raise ValueError(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> float:
return np.dot(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> float:
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> None:
lowerCamelCase : Dict = observations
lowerCamelCase : List[Any] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowerCamelCase) , ) : List[Any] = np.shape(UpperCamelCase__ )
def to_minimize(UpperCamelCase__ ) -> float:
lowerCamelCase : int = 0
((lowerCamelCase) , ) : Tuple = np.shape(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(UpperCamelCase__ )
lowerCamelCase : Dict = LinearConstraint(UpperCamelCase__ , 0 , 0 )
lowerCamelCase : Union[str, Any] = Bounds(0 , self.regularization )
lowerCamelCase : Any = minimize(
UpperCamelCase__ , np.ones(UpperCamelCase__ ) , bounds=UpperCamelCase__ , constraints=[ly_contraint] ).x
lowerCamelCase : Any = l_star
# calculating mean offset of separation plane to points
lowerCamelCase : Optional[int] = 0
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
lowerCamelCase : Optional[int] = s / n
def _lowercase ( self , UpperCamelCase__ ) -> int:
lowerCamelCase : List[str] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , UpperCamelCase__ )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
|
def A ( _SCREAMING_SNAKE_CASE ) -> list:
if n_term == "":
return []
lowerCamelCase : list = []
for temp in range(int(_SCREAMING_SNAKE_CASE ) ):
series.append(f'''1/{temp + 1}''' if series else "1" )
return series
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Dict = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 48
| 1
|
import numpy as np
from transformers import Pipeline
def A ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowerCamelCase : Dict = np.max(_SCREAMING_SNAKE_CASE ,axis=-1 ,keepdims=_SCREAMING_SNAKE_CASE )
lowerCamelCase : Dict = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 ,keepdims=_SCREAMING_SNAKE_CASE )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def _lowercase ( self , **UpperCamelCase__ ) -> Dict:
lowerCamelCase : Optional[Any] = {}
if "second_text" in kwargs:
lowerCamelCase : Union[str, Any] = kwargs["second_text"]
return preprocess_kwargs, {}, {}
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=None ) -> int:
return self.tokenizer(UpperCamelCase__ , text_pair=UpperCamelCase__ , return_tensors=self.framework )
def _lowercase ( self , UpperCamelCase__ ) -> str:
return self.model(**UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> Optional[Any]:
lowerCamelCase : str = model_outputs.logits[0].numpy()
lowerCamelCase : Any = softmax(UpperCamelCase__ )
lowerCamelCase : List[str] = np.argmax(UpperCamelCase__ )
lowerCamelCase : List[str] = self.model.config.idalabel[best_class]
lowerCamelCase : List[str] = probabilities[best_class].item()
lowerCamelCase : Optional[Any] = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 48
|
from __future__ import annotations
import requests
def A ( _SCREAMING_SNAKE_CASE ) -> dict:
lowerCamelCase : Tuple = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'''
return requests.get(_SCREAMING_SNAKE_CASE ).json()
def A ( _SCREAMING_SNAKE_CASE = 10 ) -> list[dict]:
lowerCamelCase : str = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
lowerCamelCase : Any = requests.get(_SCREAMING_SNAKE_CASE ).json()[:max_stories]
return [get_hackernews_story(_SCREAMING_SNAKE_CASE ) for story_id in story_ids]
def A ( _SCREAMING_SNAKE_CASE = 10 ) -> str:
lowerCamelCase : str = hackernews_top_stories(_SCREAMING_SNAKE_CASE )
return "\n".join("* [{title}]({url})".format(**_SCREAMING_SNAKE_CASE ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 48
| 1
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 0 ,_SCREAMING_SNAKE_CASE = 0 ) -> int:
lowerCamelCase : Optional[Any] = right or len(_SCREAMING_SNAKE_CASE ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,left + 1 ,right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {
'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json',
}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = """blip_2_vision_model"""
def __init__( self , UpperCamelCase__=1408 , UpperCamelCase__=6144 , UpperCamelCase__=39 , UpperCamelCase__=16 , UpperCamelCase__=224 , UpperCamelCase__=14 , UpperCamelCase__="gelu" , UpperCamelCase__=0.00001 , UpperCamelCase__=0.0 , UpperCamelCase__=1e-10 , UpperCamelCase__=True , **UpperCamelCase__ , ) -> Optional[Any]:
super().__init__(**UpperCamelCase__ )
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Union[str, Any] = intermediate_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Dict = patch_size
lowerCamelCase : Tuple = image_size
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Union[str, Any] = attention_dropout
lowerCamelCase : Dict = layer_norm_eps
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : str = qkv_bias
@classmethod
def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : List[str] = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
lowerCamelCase : Optional[int] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = """blip_2_qformer"""
def __init__( self , UpperCamelCase__=3_0522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0 , UpperCamelCase__="absolute" , UpperCamelCase__=2 , UpperCamelCase__=1408 , **UpperCamelCase__ , ) -> int:
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : Dict = num_hidden_layers
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : int = hidden_act
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Dict = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Dict = max_position_embeddings
lowerCamelCase : List[str] = initializer_range
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : int = position_embedding_type
lowerCamelCase : Tuple = cross_attention_frequency
lowerCamelCase : Optional[int] = encoder_hidden_size
@classmethod
def _lowercase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : str = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
lowerCamelCase : int = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : List[str] = """blip-2"""
lowerCamelCase_ : int = True
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=32 , **UpperCamelCase__ ) -> str:
super().__init__(**UpperCamelCase__ )
if vision_config is None:
lowerCamelCase : List[Any] = {}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." )
if qformer_config is None:
lowerCamelCase : List[Any] = {}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." )
if text_config is None:
lowerCamelCase : Any = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
lowerCamelCase : Optional[int] = BlipaVisionConfig(**UpperCamelCase__ )
lowerCamelCase : str = BlipaQFormerConfig(**UpperCamelCase__ )
lowerCamelCase : List[str] = text_config["model_type"] if "model_type" in text_config else "opt"
lowerCamelCase : str = CONFIG_MAPPING[text_model_type](**UpperCamelCase__ )
lowerCamelCase : Optional[Any] = self.text_config.tie_word_embeddings
lowerCamelCase : int = self.text_config.is_encoder_decoder
lowerCamelCase : Optional[Any] = num_query_tokens
lowerCamelCase : int = self.vision_config.hidden_size
lowerCamelCase : Tuple = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase : Dict = 1.0
lowerCamelCase : List[Any] = 0.02
@classmethod
def _lowercase ( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ , ) -> str:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCamelCase__ , )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Tuple = copy.deepcopy(self.__dict__ )
lowerCamelCase : Tuple = self.vision_config.to_dict()
lowerCamelCase : int = self.qformer_config.to_dict()
lowerCamelCase : Optional[Any] = self.text_config.to_dict()
lowerCamelCase : int = self.__class__.model_type
return output
| 48
| 1
|
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__=125 , UpperCamelCase__=None , **UpperCamelCase__ , ) -> None:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowerCamelCase : str = [F'''<extra_id_{i}>''' for i in range(UpperCamelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowerCamelCase : Union[str, Any] = len(set(filter(lambda UpperCamelCase__ : bool("extra_id" in str(UpperCamelCase__ ) ) , UpperCamelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"
" extra_ids tokens" )
lowerCamelCase : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
lowerCamelCase : Optional[int] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
lowerCamelCase : Any = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
super().__init__(
eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , extra_ids=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase : Any = extra_ids
lowerCamelCase : Optional[Any] = 2**8 # utf is 8 bits
# define special tokens dict
lowerCamelCase : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
lowerCamelCase : int = len(self.special_tokens_encoder )
lowerCamelCase : Tuple = len(UpperCamelCase__ )
for i, token in enumerate(UpperCamelCase__ ):
lowerCamelCase : List[str] = self.vocab_size + i - n
lowerCamelCase : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def _lowercase ( self ) -> str:
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(UpperCamelCase__ )) + [1]
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
def _lowercase ( self , UpperCamelCase__ ) -> List[int]:
if len(UpperCamelCase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : Tuple = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : List[str] = self._add_eos_if_not_present(UpperCamelCase__ )
if token_ids_a is None:
return token_ids_a
else:
lowerCamelCase : List[str] = self._add_eos_if_not_present(UpperCamelCase__ )
return token_ids_a + token_ids_a
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Any = [chr(UpperCamelCase__ ) for i in text.encode("utf-8" )]
return tokens
def _lowercase ( self , UpperCamelCase__ ) -> Union[str, Any]:
if token in self.special_tokens_encoder:
lowerCamelCase : Tuple = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
lowerCamelCase : List[str] = self.added_tokens_encoder[token]
elif len(UpperCamelCase__ ) != 1:
lowerCamelCase : Dict = self.unk_token_id
else:
lowerCamelCase : int = ord(UpperCamelCase__ ) + self._num_special_tokens
return token_id
def _lowercase ( self , UpperCamelCase__ ) -> Any:
if index in self.special_tokens_decoder:
lowerCamelCase : List[str] = self.special_tokens_decoder[index]
else:
lowerCamelCase : Any = chr(index - self._num_special_tokens )
return token
def _lowercase ( self , UpperCamelCase__ ) -> List[Any]:
lowerCamelCase : str = B""
for token in tokens:
if token in self.special_tokens_decoder:
lowerCamelCase : List[str] = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.added_tokens_decoder:
lowerCamelCase : int = self.special_tokens_decoder[token].encode("utf-8" )
elif token in self.special_tokens_encoder:
lowerCamelCase : Optional[int] = token.encode("utf-8" )
elif token in self.added_tokens_encoder:
lowerCamelCase : Optional[Any] = token.encode("utf-8" )
else:
lowerCamelCase : Optional[Any] = bytes([ord(UpperCamelCase__ )] )
bstring += tok_string
lowerCamelCase : Union[str, Any] = bstring.decode("utf-8" , errors="ignore" )
return string
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
return ()
| 48
|
import random
from .binary_exp_mod import bin_exp_mod
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=1000 ) -> List[str]:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowerCamelCase : List[Any] = n - 1
lowerCamelCase : Dict = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowerCamelCase : Optional[Any] = 0
while count < prec:
lowerCamelCase : str = random.randint(2 ,n - 1 )
lowerCamelCase : Dict = bin_exp_mod(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if b != 1:
lowerCamelCase : str = True
for _ in range(_SCREAMING_SNAKE_CASE ):
if b == n - 1:
lowerCamelCase : Tuple = False
break
lowerCamelCase : int = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[int] = abs(int(input('Enter bound : ').strip()))
print('Here\'s the list of primes:')
print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 48
| 1
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
SCREAMING_SNAKE_CASE__ : Dict = logging.getLogger(__name__)
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = """sequence-classification"""
def __init__( self , UpperCamelCase__ ) -> List[Any]:
if type(UpperCamelCase__ ) == dict:
lowerCamelCase : int = Namespace(**UpperCamelCase__ )
lowerCamelCase : str = glue_output_modes[hparams.task]
lowerCamelCase : int = glue_tasks_num_labels[hparams.task]
super().__init__(UpperCamelCase__ , UpperCamelCase__ , self.mode )
def _lowercase ( self , **UpperCamelCase__ ) -> Tuple:
return self.model(**UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
lowerCamelCase : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCamelCase : List[str] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
lowerCamelCase : Optional[int] = self(**UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = outputs[0]
lowerCamelCase : str = self.trainer.lr_schedulers[0]["scheduler"]
lowerCamelCase : Optional[int] = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _lowercase ( self ) -> str:
lowerCamelCase : Any = self.hparams
lowerCamelCase : Union[str, Any] = processors[args.task]()
lowerCamelCase : Optional[int] = processor.get_labels()
for mode in ["train", "dev"]:
lowerCamelCase : Optional[Any] = self._feature_file(UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , UpperCamelCase__ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowerCamelCase : List[str] = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
lowerCamelCase : Dict = convert_examples_to_features(
UpperCamelCase__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , UpperCamelCase__ )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> DataLoader:
lowerCamelCase : str = "dev" if mode == "test" else mode
lowerCamelCase : int = self._feature_file(UpperCamelCase__ )
logger.info("Loading features from cached file %s" , UpperCamelCase__ )
lowerCamelCase : str = torch.load(UpperCamelCase__ )
lowerCamelCase : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCamelCase : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
lowerCamelCase : List[str] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowerCamelCase : Any = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowerCamelCase : Union[str, Any] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , batch_size=UpperCamelCase__ , shuffle=UpperCamelCase__ , )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
lowerCamelCase : Dict = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCamelCase : Tuple = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
lowerCamelCase : Dict = self(**UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : Any = outputs[:2]
lowerCamelCase : Union[str, Any] = logits.detach().cpu().numpy()
lowerCamelCase : Optional[Any] = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowercase ( self , UpperCamelCase__ ) -> tuple:
lowerCamelCase : Union[str, Any] = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
lowerCamelCase : Optional[int] = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
lowerCamelCase : Union[str, Any] = np.argmax(UpperCamelCase__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowerCamelCase : str = np.squeeze(UpperCamelCase__ )
lowerCamelCase : List[Any] = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowerCamelCase : List[str] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase : Dict = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , UpperCamelCase__ , UpperCamelCase__ )}
lowerCamelCase : List[str] = dict(results.items() )
lowerCamelCase : Optional[int] = results
return ret, preds_list, out_label_list
def _lowercase ( self , UpperCamelCase__ ) -> dict:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = self._eval_end(UpperCamelCase__ )
lowerCamelCase : str = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowercase ( self , UpperCamelCase__ ) -> dict:
lowerCamelCase , lowerCamelCase , lowerCamelCase : str = self._eval_end(UpperCamelCase__ )
lowerCamelCase : str = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ )
parser.add_argument(
"--max_seq_length" , default=128 , type=UpperCamelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=UpperCamelCase__ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def A ( ) -> int:
lowerCamelCase : int = argparse.ArgumentParser()
add_generic_args(_SCREAMING_SNAKE_CASE ,os.getcwd() )
lowerCamelCase : str = GLUETransformer.add_model_specific_args(_SCREAMING_SNAKE_CASE ,os.getcwd() )
lowerCamelCase : str = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCamelCase : int = os.path.join(
"./results" ,f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' ,)
os.makedirs(args.output_dir )
lowerCamelCase : int = GLUETransformer(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Dict = generic_train(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir ,"checkpoint-epoch=*.ckpt" ) ,recursive=_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Tuple = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 48
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE__ : int = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE__ : str = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Tuple = 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : List[str] = 3
SCREAMING_SNAKE_CASE__ : Optional[int] = 4
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = VOCAB_FILES_NAMES
lowerCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : List[str] = """left"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<sep>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<cls>" , UpperCamelCase__="<mask>" , UpperCamelCase__=["<eop>", "<eod>"] , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
lowerCamelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
lowerCamelCase : Any = 3
lowerCamelCase : Optional[Any] = do_lower_case
lowerCamelCase : List[Any] = remove_space
lowerCamelCase : str = keep_accents
lowerCamelCase : List[Any] = vocab_file
lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def _lowercase ( self ) -> Optional[Any]:
return len(self.sp_model )
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : int = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
lowerCamelCase : Optional[int] = self.__dict__.copy()
lowerCamelCase : Union[str, Any] = None
return state
def __setstate__( self , UpperCamelCase__ ) -> int:
lowerCamelCase : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase : Any = {}
lowerCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self , UpperCamelCase__ ) -> Any:
if self.remove_space:
lowerCamelCase : Dict = " ".join(inputs.strip().split() )
else:
lowerCamelCase : Union[str, Any] = inputs
lowerCamelCase : Optional[Any] = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase : Optional[int] = unicodedata.normalize("NFKD" , UpperCamelCase__ )
lowerCamelCase : List[Any] = "".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
lowerCamelCase : List[str] = outputs.lower()
return outputs
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Optional[Any] = self.preprocess_text(UpperCamelCase__ )
lowerCamelCase : Dict = self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
lowerCamelCase : Dict = []
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase : Union[str, Any] = cur_pieces[1:]
else:
lowerCamelCase : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def _lowercase ( self , UpperCamelCase__ ) -> int:
return self.sp_model.PieceToId(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> Tuple:
return self.sp_model.IdToPiece(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = True , **UpperCamelCase__ , ) -> str:
lowerCamelCase : Optional[int] = kwargs.pop("use_source_tokenizer" , UpperCamelCase__ )
lowerCamelCase : Optional[int] = self.convert_ids_to_tokens(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase : Any = []
lowerCamelCase : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) )
lowerCamelCase : int = []
sub_texts.append(UpperCamelCase__ )
else:
current_sub_text.append(UpperCamelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ )
lowerCamelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase : int = self.clean_up_tokenization(UpperCamelCase__ )
return clean_text
else:
return text
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : str = [self.sep_token_id]
lowerCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1]
return ([0] * len(UpperCamelCase__ )) + [1, 1]
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : Any = [self.sep_token_id]
lowerCamelCase : List[str] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase : Union[str, Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 48
| 1
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
SCREAMING_SNAKE_CASE__ : List[str] = 16
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 32
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 16 ) -> Union[str, Any]:
lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
lowerCamelCase : Tuple = load_dataset("glue" ,"mrpc" )
def tokenize_function(_SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase : Dict = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=_SCREAMING_SNAKE_CASE ,max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCamelCase : str = datasets.map(
_SCREAMING_SNAKE_CASE ,batched=_SCREAMING_SNAKE_CASE ,remove_columns=["idx", "sentence1", "sentence2"] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase : Tuple = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(_SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCamelCase : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCamelCase : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
lowerCamelCase : Tuple = 8
else:
lowerCamelCase : Dict = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE ,padding="longest" ,max_length=_SCREAMING_SNAKE_CASE ,pad_to_multiple_of=_SCREAMING_SNAKE_CASE ,return_tensors="pt" ,)
# Instantiate dataloaders.
lowerCamelCase : int = DataLoader(
tokenized_datasets["train"] ,shuffle=_SCREAMING_SNAKE_CASE ,collate_fn=_SCREAMING_SNAKE_CASE ,batch_size=_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = DataLoader(
tokenized_datasets["validation"] ,shuffle=_SCREAMING_SNAKE_CASE ,collate_fn=_SCREAMING_SNAKE_CASE ,batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
SCREAMING_SNAKE_CASE__ : Union[str, Any] = mocked_dataloaders # noqa: F811
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" ,_SCREAMING_SNAKE_CASE ) == "1":
lowerCamelCase : Any = 2
# New Code #
lowerCamelCase : List[Any] = int(args.gradient_accumulation_steps )
lowerCamelCase : List[str] = int(args.local_sgd_steps )
# Initialize accelerator
lowerCamelCase : Any = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,gradient_accumulation_steps=_SCREAMING_SNAKE_CASE )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase : List[str] = config["lr"]
lowerCamelCase : int = int(config["num_epochs"] )
lowerCamelCase : Any = int(config["seed"] )
lowerCamelCase : int = int(config["batch_size"] )
lowerCamelCase : Any = evaluate.load("glue" ,"mrpc" )
set_seed(_SCREAMING_SNAKE_CASE )
lowerCamelCase , lowerCamelCase : Dict = get_dataloaders(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" ,return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase : Dict = model.to(accelerator.device )
# Instantiate optimizer
lowerCamelCase : Dict = AdamW(params=model.parameters() ,lr=_SCREAMING_SNAKE_CASE )
# Instantiate scheduler
lowerCamelCase : Any = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE ,num_warmup_steps=100 ,num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = accelerator.prepare(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
with LocalSGD(
accelerator=_SCREAMING_SNAKE_CASE ,model=_SCREAMING_SNAKE_CASE ,local_sgd_steps=_SCREAMING_SNAKE_CASE ,enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = output.loss
accelerator.backward(_SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCamelCase : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase : int = outputs.logits.argmax(dim=-1 )
lowerCamelCase , lowerCamelCase : str = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE ,references=_SCREAMING_SNAKE_CASE ,)
lowerCamelCase : List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' ,_SCREAMING_SNAKE_CASE )
def A ( ) -> Optional[int]:
lowerCamelCase : str = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" ,type=_SCREAMING_SNAKE_CASE ,default=_SCREAMING_SNAKE_CASE ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." ,)
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" ,type=_SCREAMING_SNAKE_CASE ,default=1 ,help="The number of minibatches to be ran before gradients are accumulated." ,)
parser.add_argument(
"--local_sgd_steps" ,type=_SCREAMING_SNAKE_CASE ,default=8 ,help="Number of local SGD steps or None to disable local SGD" )
parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." )
lowerCamelCase : Optional[int] = parser.parse_args()
lowerCamelCase : Optional[Any] = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 48
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
SCREAMING_SNAKE_CASE__ : Any = {
'b0': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 224,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 240,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 1408,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 260,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 1536,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 300,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 1792,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 380,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 2048,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 456,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 2304,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 528,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 2560,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 600,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def A ( _SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : int = EfficientNetConfig()
lowerCamelCase : List[str] = CONFIG_MAP[model_name]["hidden_dim"]
lowerCamelCase : List[str] = CONFIG_MAP[model_name]["width_coef"]
lowerCamelCase : Any = CONFIG_MAP[model_name]["depth_coef"]
lowerCamelCase : Union[str, Any] = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : Optional[int] = CONFIG_MAP[model_name]["dropout_rate"]
lowerCamelCase : str = CONFIG_MAP[model_name]["dw_padding"]
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "imagenet-1k-id2label.json"
lowerCamelCase : Any = 1000
lowerCamelCase : Any = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) )
lowerCamelCase : List[str] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : Any = {v: k for k, v in idalabel.items()}
return config
def A ( ) -> int:
lowerCamelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def A ( _SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : List[Any] = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : str = EfficientNetImageProcessor(
size={"height": size, "width": size} ,image_mean=[0.485, 0.456, 0.406] ,image_std=[0.47853944, 0.4732864, 0.47434163] ,do_center_crop=_SCREAMING_SNAKE_CASE ,)
return preprocessor
def A ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowerCamelCase : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
lowerCamelCase : Any = sorted(set(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE ,range(_SCREAMING_SNAKE_CASE ) )}
lowerCamelCase : List[Any] = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
lowerCamelCase : Dict = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
lowerCamelCase : Optional[int] = {}
for item in rename_keys:
if item[0] in original_param_names:
lowerCamelCase : List[str] = "efficientnet." + item[1]
lowerCamelCase : int = "classifier.weight"
lowerCamelCase : Union[str, Any] = "classifier.bias"
return key_mapping
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowerCamelCase : Tuple = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowerCamelCase : List[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 ,2 ,0 ,1 )
elif "depthwise_kernel" in key:
lowerCamelCase : int = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 ,3 ,0 ,1 )
elif "kernel" in key:
lowerCamelCase : List[str] = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) )
else:
lowerCamelCase : Optional[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowerCamelCase : Optional[int] = model_classes[model_name](
include_top=_SCREAMING_SNAKE_CASE ,weights="imagenet" ,input_tensor=_SCREAMING_SNAKE_CASE ,input_shape=_SCREAMING_SNAKE_CASE ,pooling=_SCREAMING_SNAKE_CASE ,classes=1000 ,classifier_activation="softmax" ,)
lowerCamelCase : List[Any] = original_model.trainable_variables
lowerCamelCase : Tuple = original_model.non_trainable_variables
lowerCamelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowerCamelCase : List[str] = param.numpy()
lowerCamelCase : int = list(tf_params.keys() )
# Load HuggingFace model
lowerCamelCase : Union[str, Any] = get_efficientnet_config(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
lowerCamelCase : Tuple = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
lowerCamelCase : Union[str, Any] = rename_keys(_SCREAMING_SNAKE_CASE )
replace_params(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
lowerCamelCase : int = convert_image_processor(_SCREAMING_SNAKE_CASE )
lowerCamelCase : int = preprocessor(images=prepare_img() ,return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowerCamelCase : Optional[Any] = hf_model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = outputs.logits.detach().numpy()
# Original model inference
lowerCamelCase : Optional[Any] = False
lowerCamelCase : Any = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : Optional[int] = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST )
lowerCamelCase : Union[str, Any] = image.img_to_array(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = np.expand_dims(_SCREAMING_SNAKE_CASE ,axis=0 )
lowerCamelCase : Dict = original_model.predict(_SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.mkdir(_SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
lowerCamelCase : int = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 48
| 1
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE__ : int = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE__ : str = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
# Segments (not really needed)
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Tuple = 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : List[str] = 3
SCREAMING_SNAKE_CASE__ : Optional[int] = 4
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = VOCAB_FILES_NAMES
lowerCamelCase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : List[str] = """left"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<sep>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<cls>" , UpperCamelCase__="<mask>" , UpperCamelCase__=["<eop>", "<eod>"] , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase : str = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
lowerCamelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
lowerCamelCase : Any = 3
lowerCamelCase : Optional[Any] = do_lower_case
lowerCamelCase : List[Any] = remove_space
lowerCamelCase : str = keep_accents
lowerCamelCase : List[Any] = vocab_file
lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def _lowercase ( self ) -> Optional[Any]:
return len(self.sp_model )
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : int = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
lowerCamelCase : Optional[int] = self.__dict__.copy()
lowerCamelCase : Union[str, Any] = None
return state
def __setstate__( self , UpperCamelCase__ ) -> int:
lowerCamelCase : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase : Any = {}
lowerCamelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self , UpperCamelCase__ ) -> Any:
if self.remove_space:
lowerCamelCase : Dict = " ".join(inputs.strip().split() )
else:
lowerCamelCase : Union[str, Any] = inputs
lowerCamelCase : Optional[Any] = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
lowerCamelCase : Optional[int] = unicodedata.normalize("NFKD" , UpperCamelCase__ )
lowerCamelCase : List[Any] = "".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
lowerCamelCase : List[str] = outputs.lower()
return outputs
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Optional[Any] = self.preprocess_text(UpperCamelCase__ )
lowerCamelCase : Dict = self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
lowerCamelCase : Dict = []
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
lowerCamelCase : List[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCamelCase : Union[str, Any] = cur_pieces[1:]
else:
lowerCamelCase : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def _lowercase ( self , UpperCamelCase__ ) -> int:
return self.sp_model.PieceToId(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> Tuple:
return self.sp_model.IdToPiece(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = True , **UpperCamelCase__ , ) -> str:
lowerCamelCase : Optional[int] = kwargs.pop("use_source_tokenizer" , UpperCamelCase__ )
lowerCamelCase : Optional[int] = self.convert_ids_to_tokens(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCamelCase : Any = []
lowerCamelCase : Any = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) )
lowerCamelCase : int = []
sub_texts.append(UpperCamelCase__ )
else:
current_sub_text.append(UpperCamelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCamelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
lowerCamelCase : Union[str, Any] = "".join(UpperCamelCase__ )
lowerCamelCase : Tuple = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCamelCase : int = self.clean_up_tokenization(UpperCamelCase__ )
return clean_text
else:
return text
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : str = [self.sep_token_id]
lowerCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1]
return ([0] * len(UpperCamelCase__ )) + [1, 1]
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> List[int]:
lowerCamelCase : Any = [self.sep_token_id]
lowerCamelCase : List[str] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase : Union[str, Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 48
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,) -> List[str]:
if config_name_or_path is None:
lowerCamelCase : Any = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
lowerCamelCase : Dict = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCamelCase : Any = question_encoder_name_or_path
lowerCamelCase : str = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
lowerCamelCase : List[Any] = RagConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = gen_config
lowerCamelCase : Optional[Any] = question_encoder_config
lowerCamelCase : List[Any] = model_class.from_pretrained_question_encoder_generator(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,config=_SCREAMING_SNAKE_CASE )
rag_model.save_pretrained(_SCREAMING_SNAKE_CASE )
# Sanity check.
model_class.from_pretrained(_SCREAMING_SNAKE_CASE )
# Save tokenizers.
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
lowerCamelCase : int = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Any = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Optional[Any] = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 48
| 1
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
while a != 0:
lowerCamelCase , lowerCamelCase : Optional[int] = b % a, a
return b
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
if gcd(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) != 1:
lowerCamelCase : List[Any] = f'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(_SCREAMING_SNAKE_CASE )
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = 1, 0, a
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = 0, 1, m
while va != 0:
lowerCamelCase : List[str] = ua // va
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Dict = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 48
|
import math
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float:
if (
not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> float:
if (
not isinstance(_SCREAMING_SNAKE_CASE ,(int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48
| 1
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
SCREAMING_SNAKE_CASE__ : Union[str, Any] = imread(r'digital_image_processing/image_data/lena_small.jpg')
SCREAMING_SNAKE_CASE__ : List[str] = cvtColor(img, COLOR_BGR2GRAY)
def A ( ) -> int:
lowerCamelCase : List[Any] = cn.convert_to_negative(_SCREAMING_SNAKE_CASE )
# assert negative_img array for at least one True
assert negative_img.any()
def A ( ) -> Union[str, Any]:
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_SCREAMING_SNAKE_CASE ,110 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def A ( ) -> Tuple:
lowerCamelCase : List[Any] = canny.gen_gaussian_kernel(9 ,sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def A ( ) -> Any:
lowerCamelCase : Tuple = imread("digital_image_processing/image_data/lena_small.jpg" ,0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase : str = canny.canny(_SCREAMING_SNAKE_CASE )
# assert canny array for at least one True
assert canny_array.any()
def A ( ) -> Tuple:
assert gg.gaussian_filter(_SCREAMING_SNAKE_CASE ,5 ,sigma=0.9 ).all()
def A ( ) -> List[Any]:
# laplace diagonals
lowerCamelCase : Any = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
lowerCamelCase : Optional[int] = conv.img_convolve(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).astype(_SCREAMING_SNAKE_CASE )
assert res.any()
def A ( ) -> Any:
assert med.median_filter(_SCREAMING_SNAKE_CASE ,3 ).any()
def A ( ) -> str:
lowerCamelCase , lowerCamelCase : List[Any] = sob.sobel_filter(_SCREAMING_SNAKE_CASE )
assert grad.any() and theta.any()
def A ( ) -> Union[str, Any]:
lowerCamelCase : Tuple = sp.make_sepia(_SCREAMING_SNAKE_CASE ,20 )
assert sepia.all()
def A ( _SCREAMING_SNAKE_CASE = "digital_image_processing/image_data/lena_small.jpg" ) -> List[Any]:
lowerCamelCase : Any = bs.Burkes(imread(_SCREAMING_SNAKE_CASE ,1 ) ,120 )
burkes.process()
assert burkes.output_img.any()
def A ( _SCREAMING_SNAKE_CASE = "digital_image_processing/image_data/lena_small.jpg" ,) -> Optional[Any]:
lowerCamelCase : List[str] = rs.NearestNeighbour(imread(_SCREAMING_SNAKE_CASE ,1 ) ,400 ,200 )
nn.process()
assert nn.output.any()
def A ( ) -> List[Any]:
lowerCamelCase : Any = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
lowerCamelCase : List[Any] = imread(_SCREAMING_SNAKE_CASE ,0 )
# Test for get_neighbors_pixel function() return not None
lowerCamelCase : Optional[int] = 0
lowerCamelCase : Any = 0
lowerCamelCase : List[Any] = image[x_coordinate][y_coordinate]
lowerCamelCase : str = lbp.get_neighbors_pixel(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase : Optional[int] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 ,image.shape[0] ):
for j in range(0 ,image.shape[1] ):
lowerCamelCase : List[Any] = lbp.local_binary_value(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
assert lbp_image.any()
| 48
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> Any:
lowerCamelCase : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowerCamelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> str:
for i in range(config.num_hidden_layers ):
if base_model:
lowerCamelCase : Optional[int] = ""
else:
lowerCamelCase : List[str] = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase : List[str] = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
lowerCamelCase : Optional[int] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : List[Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Any = in_proj_bias[: config.hidden_size]
lowerCamelCase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : List[Any] = in_proj_bias[-config.hidden_size :]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : List[str] = dct.pop(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = val
def A ( ) -> List[str]:
lowerCamelCase : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : str = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowerCamelCase : Union[str, Any] = DeiTConfig()
# all deit models have fine-tuned heads
lowerCamelCase : Optional[int] = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase : Dict = 1000
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "imagenet-1k-id2label.json"
lowerCamelCase : List[Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) )
lowerCamelCase : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Dict = int(deit_name[-6:-4] )
lowerCamelCase : Optional[Any] = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
lowerCamelCase : Optional[Any] = 192
lowerCamelCase : List[str] = 768
lowerCamelCase : Tuple = 12
lowerCamelCase : Optional[Any] = 3
elif deit_name[9:].startswith("small" ):
lowerCamelCase : str = 384
lowerCamelCase : Optional[Any] = 1536
lowerCamelCase : Dict = 12
lowerCamelCase : Optional[int] = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
lowerCamelCase : str = 1024
lowerCamelCase : List[str] = 4096
lowerCamelCase : Any = 24
lowerCamelCase : Dict = 16
# load original model from timm
lowerCamelCase : List[Any] = timm.create_model(_SCREAMING_SNAKE_CASE ,pretrained=_SCREAMING_SNAKE_CASE )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCamelCase : Dict = timm_model.state_dict()
lowerCamelCase : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# load HuggingFace model
lowerCamelCase : Optional[Any] = DeiTForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE ).eval()
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by DeiTImageProcessor
lowerCamelCase : Any = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowerCamelCase : Union[str, Any] = DeiTImageProcessor(size=_SCREAMING_SNAKE_CASE ,crop_size=config.image_size )
lowerCamelCase : str = image_processor(images=prepare_img() ,return_tensors="pt" )
lowerCamelCase : int = encoding["pixel_values"]
lowerCamelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = timm_model(_SCREAMING_SNAKE_CASE )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_SCREAMING_SNAKE_CASE ,outputs.logits ,atol=1e-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 48
| 1
|
import argparse
import os
import re
SCREAMING_SNAKE_CASE__ : List[Any] = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(r'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
SCREAMING_SNAKE_CASE__ : Tuple = re.compile(r'\s*\(\s*"(\S[^"]+)"')
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ) -> int:
with open(_SCREAMING_SNAKE_CASE ,"r" ,encoding="utf-8" ) as f:
lowerCamelCase : List[Any] = f.read()
lowerCamelCase : str = content.split("\n" )
lowerCamelCase : int = []
lowerCamelCase : List[Any] = 0
while line_idx < len(_SCREAMING_SNAKE_CASE ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
lowerCamelCase : Optional[int] = len(re.search(r"^(\s*)\S" ,lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
lowerCamelCase : Optional[int] = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
lowerCamelCase : List[str] = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
lowerCamelCase : Union[str, Any] = sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : _re_identifier.search(_SCREAMING_SNAKE_CASE ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(_SCREAMING_SNAKE_CASE ) )
elif "\n".join(_SCREAMING_SNAKE_CASE ) != content:
return True
def A ( _SCREAMING_SNAKE_CASE = False ) -> List[str]:
lowerCamelCase : str = [os.path.join(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for f in os.listdir(_SCREAMING_SNAKE_CASE ) if f.endswith(".py" )]
lowerCamelCase : Union[str, Any] = [sort_auto_mapping(_SCREAMING_SNAKE_CASE ,overwrite=_SCREAMING_SNAKE_CASE ) for fname in fnames]
if not overwrite and any(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : str = [f for f, d in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) if d]
raise ValueError(
f'''The following files have auto mappings that need sorting: {", ".join(_SCREAMING_SNAKE_CASE )}. Run `make style` to fix'''
" this." )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 48
|
import random
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> tuple:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = [], [], []
for element in data:
if element < pivot:
less.append(_SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(_SCREAMING_SNAKE_CASE )
else:
equal.append(_SCREAMING_SNAKE_CASE )
return less, equal, greater
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_SCREAMING_SNAKE_CASE ) or index < 0:
return None
lowerCamelCase : List[Any] = items[random.randint(0 ,len(_SCREAMING_SNAKE_CASE ) - 1 )]
lowerCamelCase : Dict = 0
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = _partition(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = len(_SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(_SCREAMING_SNAKE_CASE ,index - (m + count) )
| 48
| 1
|
import argparse
import os
import re
SCREAMING_SNAKE_CASE__ : Any = 'src/transformers'
# Pattern that looks at the indentation in a line.
SCREAMING_SNAKE_CASE__ : Any = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
SCREAMING_SNAKE_CASE__ : List[str] = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
SCREAMING_SNAKE_CASE__ : str = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
SCREAMING_SNAKE_CASE__ : int = re.compile(r'\[([^\]]+)\]')
def A ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowerCamelCase : List[str] = _re_indent.search(_SCREAMING_SNAKE_CASE )
return "" if search is None else search.groups()[0]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE="" ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ) -> Tuple:
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : Any = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(_SCREAMING_SNAKE_CASE ):
index += 1
lowerCamelCase : List[Any] = ["\n".join(lines[:index] )]
else:
lowerCamelCase : str = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCamelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(_SCREAMING_SNAKE_CASE ) and (end_prompt is None or not lines[index].startswith(_SCREAMING_SNAKE_CASE )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_SCREAMING_SNAKE_CASE ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(_SCREAMING_SNAKE_CASE ) )
if index < len(_SCREAMING_SNAKE_CASE ) - 1:
lowerCamelCase : Any = [lines[index + 1]]
index += 1
else:
lowerCamelCase : Any = []
else:
blocks.append("\n".join(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Tuple = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_SCREAMING_SNAKE_CASE ) > 0:
blocks.append("\n".join(_SCREAMING_SNAKE_CASE ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_SCREAMING_SNAKE_CASE ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def A ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
def _inner(_SCREAMING_SNAKE_CASE ):
return key(_SCREAMING_SNAKE_CASE ).lower().replace("_" ,"" )
return _inner
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
# If no key is provided, we use a noop.
def noop(_SCREAMING_SNAKE_CASE ):
return x
if key is None:
lowerCamelCase : List[str] = noop
# Constants are all uppercase, they go first.
lowerCamelCase : int = [obj for obj in objects if key(_SCREAMING_SNAKE_CASE ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCamelCase : Union[str, Any] = [obj for obj in objects if key(_SCREAMING_SNAKE_CASE )[0].isupper() and not key(_SCREAMING_SNAKE_CASE ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCamelCase : Optional[int] = [obj for obj in objects if not key(_SCREAMING_SNAKE_CASE )[0].isupper()]
lowerCamelCase : Union[str, Any] = ignore_underscore(_SCREAMING_SNAKE_CASE )
return sorted(_SCREAMING_SNAKE_CASE ,key=_SCREAMING_SNAKE_CASE ) + sorted(_SCREAMING_SNAKE_CASE ,key=_SCREAMING_SNAKE_CASE ) + sorted(_SCREAMING_SNAKE_CASE ,key=_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE ) -> List[Any]:
# This inner function sort imports between [ ].
def _replace(_SCREAMING_SNAKE_CASE ):
lowerCamelCase : Optional[Any] = match.groups()[0]
if "," not in imports:
return f'''[{imports}]'''
lowerCamelCase : int = [part.strip().replace("\"" ,"" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase : Optional[int] = keys[:-1]
return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(_SCREAMING_SNAKE_CASE )] ) + "]"
lowerCamelCase : List[str] = import_statement.split("\n" )
if len(_SCREAMING_SNAKE_CASE ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCamelCase : List[str] = 2 if lines[1].strip() == "[" else 1
lowerCamelCase : Union[str, Any] = [(i, _re_strip_line.search(_SCREAMING_SNAKE_CASE ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCamelCase : str = sort_objects(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : x[1] )
lowerCamelCase : str = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_SCREAMING_SNAKE_CASE ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCamelCase : Optional[int] = _re_bracket_content.sub(_replace ,lines[1] )
else:
lowerCamelCase : List[str] = [part.strip().replace("\"" ,"" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCamelCase : Any = keys[:-1]
lowerCamelCase : Any = get_indent(lines[1] ) + ", ".join([f'''"{k}"''' for k in sort_objects(_SCREAMING_SNAKE_CASE )] )
return "\n".join(_SCREAMING_SNAKE_CASE )
else:
# Finally we have to deal with imports fitting on one line
lowerCamelCase : Dict = _re_bracket_content.sub(_replace ,_SCREAMING_SNAKE_CASE )
return import_statement
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=True ) -> Optional[Any]:
with open(_SCREAMING_SNAKE_CASE ,encoding="utf-8" ) as f:
lowerCamelCase : Union[str, Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCamelCase : Optional[int] = split_code_in_indented_blocks(
_SCREAMING_SNAKE_CASE ,start_prompt="_import_structure = {" ,end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 ,len(_SCREAMING_SNAKE_CASE ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCamelCase : int = main_blocks[block_idx]
lowerCamelCase : Union[str, Any] = block.split("\n" )
# Get to the start of the imports.
lowerCamelCase : List[str] = 0
while line_idx < len(_SCREAMING_SNAKE_CASE ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCamelCase : int = len(_SCREAMING_SNAKE_CASE )
else:
line_idx += 1
if line_idx >= len(_SCREAMING_SNAKE_CASE ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCamelCase : Optional[int] = "\n".join(block_lines[line_idx:-1] )
lowerCamelCase : Optional[int] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCamelCase : Dict = split_code_in_indented_blocks(_SCREAMING_SNAKE_CASE ,indent_level=_SCREAMING_SNAKE_CASE )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCamelCase : Union[str, Any] = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCamelCase : Optional[Any] = [(pattern.search(_SCREAMING_SNAKE_CASE ).groups()[0] if pattern.search(_SCREAMING_SNAKE_CASE ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCamelCase : Optional[Any] = [(i, key) for i, key in enumerate(_SCREAMING_SNAKE_CASE ) if key is not None]
lowerCamelCase : str = [x[0] for x in sorted(_SCREAMING_SNAKE_CASE ,key=lambda _SCREAMING_SNAKE_CASE : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCamelCase : List[Any] = 0
lowerCamelCase : Optional[Any] = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowerCamelCase : Union[str, Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_SCREAMING_SNAKE_CASE )
count += 1
# And we put our main block back together with its first and last line.
lowerCamelCase : Dict = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_SCREAMING_SNAKE_CASE ):
if check_only:
return True
else:
print(f'''Overwriting {file}.''' )
with open(_SCREAMING_SNAKE_CASE ,"w" ,encoding="utf-8" ) as f:
f.write("\n".join(_SCREAMING_SNAKE_CASE ) )
def A ( _SCREAMING_SNAKE_CASE=True ) -> Any:
lowerCamelCase : Optional[Any] = []
for root, _, files in os.walk(_SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
lowerCamelCase : Any = sort_imports(os.path.join(_SCREAMING_SNAKE_CASE ,"__init__.py" ) ,check_only=_SCREAMING_SNAKE_CASE )
if result:
lowerCamelCase : Optional[Any] = [os.path.join(_SCREAMING_SNAKE_CASE ,"__init__.py" )]
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f'''Would overwrite {len(_SCREAMING_SNAKE_CASE )} files, run `make style`.''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 48
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
return x if y == 0 else greatest_common_divisor(_SCREAMING_SNAKE_CASE ,x % y )
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
return (x * y) // greatest_common_divisor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def A ( _SCREAMING_SNAKE_CASE = 20 ) -> int:
lowerCamelCase : List[Any] = 1
for i in range(1 ,n + 1 ):
lowerCamelCase : List[str] = lcm(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 48
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : int = UnCLIPImageVariationPipeline
lowerCamelCase_ : List[str] = IMAGE_VARIATION_PARAMS - {"""height""", """width""", """guidance_scale"""}
lowerCamelCase_ : Dict = IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase_ : Any = [
"""generator""",
"""return_dict""",
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
lowerCamelCase_ : Any = False
@property
def _lowercase ( self ) -> List[Any]:
return 32
@property
def _lowercase ( self ) -> Union[str, Any]:
return 32
@property
def _lowercase ( self ) -> Optional[Any]:
return self.time_input_dim
@property
def _lowercase ( self ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def _lowercase ( self ) -> str:
return 100
@property
def _lowercase ( self ) -> Tuple:
lowerCamelCase : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def _lowercase ( self ) -> str:
torch.manual_seed(0 )
lowerCamelCase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCamelCase__ )
@property
def _lowercase ( self ) -> Dict:
torch.manual_seed(0 )
lowerCamelCase : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(UpperCamelCase__ )
@property
def _lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCamelCase : str = {
"clip_embeddings_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"cross_attention_dim": self.cross_attention_dim,
}
lowerCamelCase : str = UnCLIPTextProjModel(**UpperCamelCase__ )
return model
@property
def _lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCamelCase : Tuple = {
"sample_size": 32,
# RGB in channels
"in_channels": 3,
# Out channels is double in channels because predicts mean and variance
"out_channels": 6,
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": "identity",
}
lowerCamelCase : Tuple = UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def _lowercase ( self ) -> Tuple:
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _lowercase ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCamelCase : Tuple = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _lowercase ( self ) -> Tuple:
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
lowerCamelCase : Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : List[Any] = self.dummy_decoder
lowerCamelCase : Union[str, Any] = self.dummy_text_proj
lowerCamelCase : Union[str, Any] = self.dummy_text_encoder
lowerCamelCase : List[str] = self.dummy_tokenizer
lowerCamelCase : Optional[Any] = self.dummy_super_res_first
lowerCamelCase : Tuple = self.dummy_super_res_last
lowerCamelCase : Union[str, Any] = UnCLIPScheduler(
variance_type="learned_range" , prediction_type="epsilon" , num_train_timesteps=1000 , )
lowerCamelCase : Optional[int] = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="epsilon" , num_train_timesteps=1000 , )
lowerCamelCase : Union[str, Any] = CLIPImageProcessor(crop_size=32 , size=32 )
lowerCamelCase : Any = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=0 , UpperCamelCase__=True ) -> Optional[Any]:
lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
if str(UpperCamelCase__ ).startswith("mps" ):
lowerCamelCase : List[Any] = torch.manual_seed(UpperCamelCase__ )
else:
lowerCamelCase : Any = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
if pil_image:
lowerCamelCase : Tuple = input_image * 0.5 + 0.5
lowerCamelCase : Any = input_image.clamp(0 , 1 )
lowerCamelCase : Dict = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCamelCase : Union[str, Any] = DiffusionPipeline.numpy_to_pil(UpperCamelCase__ )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _lowercase ( self ) -> Tuple:
lowerCamelCase : List[str] = "cpu"
lowerCamelCase : List[str] = self.get_dummy_components()
lowerCamelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase__ )
lowerCamelCase : Any = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase : str = self.get_dummy_inputs(UpperCamelCase__ , pil_image=UpperCamelCase__ )
lowerCamelCase : Optional[int] = pipe(**UpperCamelCase__ )
lowerCamelCase : Tuple = output.images
lowerCamelCase : str = self.get_dummy_inputs(UpperCamelCase__ , pil_image=UpperCamelCase__ )
lowerCamelCase : Optional[int] = pipe(
**UpperCamelCase__ , return_dict=UpperCamelCase__ , )[0]
lowerCamelCase : List[Any] = image[0, -3:, -3:, -1]
lowerCamelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase : Union[str, Any] = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self ) -> int:
lowerCamelCase : int = "cpu"
lowerCamelCase : Tuple = self.get_dummy_components()
lowerCamelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase__ )
lowerCamelCase : List[Any] = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase : int = self.get_dummy_inputs(UpperCamelCase__ , pil_image=UpperCamelCase__ )
lowerCamelCase : Any = pipe(**UpperCamelCase__ )
lowerCamelCase : List[str] = output.images
lowerCamelCase : Dict = self.get_dummy_inputs(UpperCamelCase__ , pil_image=UpperCamelCase__ )
lowerCamelCase : str = pipe(
**UpperCamelCase__ , return_dict=UpperCamelCase__ , )[0]
lowerCamelCase : int = image[0, -3:, -3:, -1]
lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase : Dict = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : int = "cpu"
lowerCamelCase : Tuple = self.get_dummy_components()
lowerCamelCase : Optional[int] = self.pipeline_class(**UpperCamelCase__ )
lowerCamelCase : Any = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase : List[Any] = self.get_dummy_inputs(UpperCamelCase__ , pil_image=UpperCamelCase__ )
lowerCamelCase : List[Any] = [
pipeline_inputs["image"],
pipeline_inputs["image"],
]
lowerCamelCase : Tuple = pipe(**UpperCamelCase__ )
lowerCamelCase : str = output.images
lowerCamelCase : Tuple = self.get_dummy_inputs(UpperCamelCase__ , pil_image=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = [
tuple_pipeline_inputs["image"],
tuple_pipeline_inputs["image"],
]
lowerCamelCase : Optional[int] = pipe(
**UpperCamelCase__ , return_dict=UpperCamelCase__ , )[0]
lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
lowerCamelCase : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCamelCase : int = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : Optional[Any] = torch.device("cpu" )
class UpperCamelCase__ :
'''simple docstring'''
lowerCamelCase_ : Any = 1
lowerCamelCase : Dict = self.get_dummy_components()
lowerCamelCase : List[str] = self.pipeline_class(**UpperCamelCase__ )
lowerCamelCase : Dict = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase : Dict = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
lowerCamelCase : Dict = pipe.decoder.dtype
lowerCamelCase : str = 1
lowerCamelCase : str = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCamelCase : Optional[Any] = pipe.prepare_latents(
UpperCamelCase__ , dtype=UpperCamelCase__ , device=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , scheduler=DummyScheduler() )
lowerCamelCase : Union[str, Any] = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCamelCase : Tuple = pipe.prepare_latents(
UpperCamelCase__ , dtype=UpperCamelCase__ , device=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , scheduler=DummyScheduler() )
lowerCamelCase : Optional[Any] = self.get_dummy_inputs(UpperCamelCase__ , pil_image=UpperCamelCase__ )
lowerCamelCase : Dict = pipe(
**UpperCamelCase__ , decoder_latents=UpperCamelCase__ , super_res_latents=UpperCamelCase__ ).images
lowerCamelCase : Any = self.get_dummy_inputs(UpperCamelCase__ , pil_image=UpperCamelCase__ )
# Don't pass image, instead pass embedding
lowerCamelCase : List[Any] = pipeline_inputs.pop("image" )
lowerCamelCase : int = pipe.image_encoder(UpperCamelCase__ ).image_embeds
lowerCamelCase : Dict = pipe(
**UpperCamelCase__ , decoder_latents=UpperCamelCase__ , super_res_latents=UpperCamelCase__ , image_embeddings=UpperCamelCase__ , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def _lowercase ( self ) -> str:
lowerCamelCase : Union[str, Any] = torch_device == "cpu"
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCamelCase : Optional[Any] = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=UpperCamelCase__ , expected_max_diff=UpperCamelCase__ )
@skip_mps
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Union[str, Any] = torch_device == "cpu"
lowerCamelCase : Tuple = True
lowerCamelCase : Optional[int] = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
self._test_inference_batch_single_identical(
test_max_difference=UpperCamelCase__ , relax_max_difference=UpperCamelCase__ , additional_params_copy_to_batched_inputs=UpperCamelCase__ , )
def _lowercase ( self ) -> str:
lowerCamelCase : List[Any] = [
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCamelCase : Optional[int] = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=UpperCamelCase__ , additional_params_copy_to_batched_inputs=UpperCamelCase__ , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=UpperCamelCase__ )
@skip_mps
def _lowercase ( self ) -> Dict:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _lowercase ( self ) -> Union[str, Any]:
return super().test_save_load_local()
@skip_mps
def _lowercase ( self ) -> Union[str, Any]:
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png" )
lowerCamelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/unclip/karlo_v1_alpha_cat_variation_fp16.npy" )
lowerCamelCase : Optional[int] = UnCLIPImageVariationPipeline.from_pretrained(
"kakaobrain/karlo-v1-alpha-image-variations" , torch_dtype=torch.floataa )
lowerCamelCase : Tuple = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase : Tuple = pipeline(
UpperCamelCase__ , generator=UpperCamelCase__ , output_type="np" , )
lowerCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ , 15 )
| 48
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowerCAmelCase__ ) , """Tatoeba directory does not exist.""" )
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self ) -> int:
lowerCamelCase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=UpperCamelCase__ )
@slow
def _lowercase ( self ) -> List[Any]:
self.resolver.convert_models(["heb-eng"] )
@slow
def _lowercase ( self ) -> Tuple:
lowerCamelCase , lowerCamelCase : Dict = self.resolver.write_model_card("opus-mt-he-en" , dry_run=UpperCamelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 48
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : List[str] = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
# Initialise PyTorch model
lowerCamelCase : Any = TaConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
lowerCamelCase : str = TaForConditionalGeneration(_SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 48
| 1
|
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
requires_backends(self , "decord" )
self.check_model_type(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None ) -> Dict:
lowerCamelCase : List[Any] = {}
if frame_sampling_rate is not None:
lowerCamelCase : Tuple = frame_sampling_rate
if num_frames is not None:
lowerCamelCase : List[str] = num_frames
lowerCamelCase : int = {}
if top_k is not None:
lowerCamelCase : str = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , UpperCamelCase__ , **UpperCamelCase__ ) -> int:
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=1 ) -> Dict:
if num_frames is None:
lowerCamelCase : int = self.model.config.num_frames
if video.startswith("http://" ) or video.startswith("https://" ):
lowerCamelCase : Union[str, Any] = BytesIO(requests.get(UpperCamelCase__ ).content )
lowerCamelCase : str = VideoReader(UpperCamelCase__ )
videoreader.seek(0 )
lowerCamelCase : List[Any] = 0
lowerCamelCase : Optional[int] = num_frames * frame_sampling_rate - 1
lowerCamelCase : Dict = np.linspace(UpperCamelCase__ , UpperCamelCase__ , num=UpperCamelCase__ , dtype=np.intaa )
lowerCamelCase : List[Any] = videoreader.get_batch(UpperCamelCase__ ).asnumpy()
lowerCamelCase : Optional[int] = list(UpperCamelCase__ )
lowerCamelCase : int = self.image_processor(UpperCamelCase__ , return_tensors=self.framework )
return model_inputs
def _lowercase ( self , UpperCamelCase__ ) -> int:
lowerCamelCase : List[Any] = self.model(**UpperCamelCase__ )
return model_outputs
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=5 ) -> int:
if top_k > self.model.config.num_labels:
lowerCamelCase : Any = self.model.config.num_labels
if self.framework == "pt":
lowerCamelCase : Any = model_outputs.logits.softmax(-1 )[0]
lowerCamelCase , lowerCamelCase : List[Any] = probs.topk(UpperCamelCase__ )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
lowerCamelCase : Union[str, Any] = scores.tolist()
lowerCamelCase : Dict = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase__ , UpperCamelCase__ )]
| 48
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
| 1
|
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
SCREAMING_SNAKE_CASE__ : Any = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def A ( _SCREAMING_SNAKE_CASE ) -> Tuple:
lowerCamelCase : Optional[int] = {}
state_dict.pop("pixel_mean" ,_SCREAMING_SNAKE_CASE )
state_dict.pop("pixel_std" ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = r".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCamelCase : Dict = key.replace(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if re.match(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
lowerCamelCase : str = int(re.match(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).group(2 ) )
if layer_nb == 0:
lowerCamelCase : int = key.replace("layers.0" ,"proj_in" )
elif layer_nb == 1:
lowerCamelCase : int = key.replace("layers.1" ,"layers.0" )
elif layer_nb == 2:
lowerCamelCase : Tuple = key.replace("layers.2" ,"proj_out" )
lowerCamelCase : List[Any] = value
lowerCamelCase : Tuple = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE="ybelkada/segment-anything" ) -> List[str]:
lowerCamelCase : Union[str, Any] = hf_hub_download(_SCREAMING_SNAKE_CASE ,f'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
lowerCamelCase : Optional[int] = SamConfig()
elif "sam_vit_l" in model_name:
lowerCamelCase : Optional[Any] = SamVisionConfig(
hidden_size=1024 ,num_hidden_layers=24 ,num_attention_heads=16 ,global_attn_indexes=[5, 11, 17, 23] ,)
lowerCamelCase : int = SamConfig(
vision_config=_SCREAMING_SNAKE_CASE ,)
elif "sam_vit_h" in model_name:
lowerCamelCase : Optional[int] = SamVisionConfig(
hidden_size=1280 ,num_hidden_layers=32 ,num_attention_heads=16 ,global_attn_indexes=[7, 15, 23, 31] ,)
lowerCamelCase : int = SamConfig(
vision_config=_SCREAMING_SNAKE_CASE ,)
lowerCamelCase : Optional[Any] = torch.load(_SCREAMING_SNAKE_CASE ,map_location="cpu" )
lowerCamelCase : List[Any] = replace_keys(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = SamImageProcessor()
lowerCamelCase : List[str] = SamProcessor(image_processor=_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = SamModel(_SCREAMING_SNAKE_CASE )
hf_model.load_state_dict(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = hf_model.to("cuda" )
lowerCamelCase : Dict = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
lowerCamelCase : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw ).convert("RGB" )
lowerCamelCase : Dict = [[[400, 650]]]
lowerCamelCase : str = [[1]]
lowerCamelCase : Dict = processor(images=np.array(_SCREAMING_SNAKE_CASE ) ,return_tensors="pt" ).to("cuda" )
with torch.no_grad():
lowerCamelCase : str = hf_model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase : Any = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579890251159668
lowerCamelCase : str = processor(
images=np.array(_SCREAMING_SNAKE_CASE ) ,input_points=_SCREAMING_SNAKE_CASE ,input_labels=_SCREAMING_SNAKE_CASE ,return_tensors="pt" ).to("cuda" )
with torch.no_grad():
lowerCamelCase : Any = hf_model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712603092193604
lowerCamelCase : Any = ((75, 275, 1725, 850),)
lowerCamelCase : Any = processor(images=np.array(_SCREAMING_SNAKE_CASE ) ,input_boxes=_SCREAMING_SNAKE_CASE ,return_tensors="pt" ).to("cuda" )
with torch.no_grad():
lowerCamelCase : Union[str, Any] = hf_model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase : int = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686015605926514
# Test with 2 points and 1 image.
lowerCamelCase : Optional[Any] = [[[400, 650], [800, 650]]]
lowerCamelCase : Any = [[1, 1]]
lowerCamelCase : int = processor(
images=np.array(_SCREAMING_SNAKE_CASE ) ,input_points=_SCREAMING_SNAKE_CASE ,input_labels=_SCREAMING_SNAKE_CASE ,return_tensors="pt" ).to("cuda" )
with torch.no_grad():
lowerCamelCase : Optional[Any] = hf_model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase : int = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936047792434692
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE__ : int = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
SCREAMING_SNAKE_CASE__ : Any = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 48
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> list:
lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = []
for i in range(len(_SCREAMING_SNAKE_CASE ) - pat_len + 1 ):
lowerCamelCase : Dict = True
for j in range(_SCREAMING_SNAKE_CASE ):
if s[i + j] != pattern[j]:
lowerCamelCase : Optional[int] = False
break
if match_found:
position.append(_SCREAMING_SNAKE_CASE )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 48
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : List[str] = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
| 1
|
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Any = PhobertTokenizer
lowerCamelCase_ : List[str] = False
def _lowercase ( self ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase : Union[str, Any] = ["T@@", "i", "I", "R@@", "r", "e@@"]
lowerCamelCase : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
lowerCamelCase : Union[str, Any] = ["#version: 0.2", "l à</w>"]
lowerCamelCase : Dict = {"unk_token": "<unk>"}
lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCamelCase__ ) )
def _lowercase ( self , **UpperCamelCase__ ) -> Tuple:
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Union[str, Any] = "Tôi là VinAI Research"
lowerCamelCase : List[Any] = "T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"
return input_text, output_text
def _lowercase ( self ) -> Tuple:
lowerCamelCase : Optional[int] = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase : Any = "Tôi là VinAI Research"
lowerCamelCase : str = "T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h".split()
lowerCamelCase : Tuple = tokenizer.tokenize(UpperCamelCase__ )
print(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[int] = tokens + [tokenizer.unk_token]
lowerCamelCase : Any = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
| 48
|
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def A ( _SCREAMING_SNAKE_CASE ) -> tuple:
return (data["data"], data["target"])
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> np.ndarray:
lowerCamelCase : List[str] = XGBRegressor(verbosity=0 ,random_state=42 )
xgb.fit(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Predict target for test data
lowerCamelCase : List[Any] = xgb.predict(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = predictions.reshape(len(_SCREAMING_SNAKE_CASE ) ,1 )
return predictions
def A ( ) -> None:
lowerCamelCase : Dict = fetch_california_housing()
lowerCamelCase , lowerCamelCase : Tuple = data_handling(_SCREAMING_SNAKE_CASE )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = train_test_split(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,test_size=0.25 ,random_state=1 )
lowerCamelCase : Any = xgboost(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}''' )
print(f'''Mean Square Error : {mean_squared_error(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 48
| 1
|
from math import factorial
def A ( _SCREAMING_SNAKE_CASE = 20 ) -> int:
lowerCamelCase : List[str] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
lowerCamelCase : Optional[Any] = n // 2
return int(factorial(_SCREAMING_SNAKE_CASE ) / (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
SCREAMING_SNAKE_CASE__ : List[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 48
|
from math import sqrt
def A ( _SCREAMING_SNAKE_CASE = 100_0000 ) -> int:
lowerCamelCase : int = 0
lowerCamelCase : int = 0
lowerCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 ,2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_SCREAMING_SNAKE_CASE ,sum_shortest_sides // 2 )
- max(1 ,sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 48
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Dict = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : int = """openai-gpt"""
lowerCamelCase_ : Union[str, Any] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , UpperCamelCase__=4_0478 , UpperCamelCase__=512 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=1e-5 , UpperCamelCase__=0.02 , UpperCamelCase__="cls_index" , UpperCamelCase__=True , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=0.1 , **UpperCamelCase__ , ) -> Optional[int]:
lowerCamelCase : str = vocab_size
lowerCamelCase : str = n_positions
lowerCamelCase : List[Any] = n_embd
lowerCamelCase : int = n_layer
lowerCamelCase : int = n_head
lowerCamelCase : Tuple = afn
lowerCamelCase : List[str] = resid_pdrop
lowerCamelCase : Any = embd_pdrop
lowerCamelCase : Union[str, Any] = attn_pdrop
lowerCamelCase : Union[str, Any] = layer_norm_epsilon
lowerCamelCase : Any = initializer_range
lowerCamelCase : Any = summary_type
lowerCamelCase : int = summary_use_proj
lowerCamelCase : Union[str, Any] = summary_activation
lowerCamelCase : Union[str, Any] = summary_first_dropout
lowerCamelCase : Union[str, Any] = summary_proj_to_labels
super().__init__(**UpperCamelCase__ )
| 48
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
SCREAMING_SNAKE_CASE__ : Dict = logging.getLogger(__name__)
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = """sequence-classification"""
def __init__( self , UpperCamelCase__ ) -> List[Any]:
if type(UpperCamelCase__ ) == dict:
lowerCamelCase : int = Namespace(**UpperCamelCase__ )
lowerCamelCase : str = glue_output_modes[hparams.task]
lowerCamelCase : int = glue_tasks_num_labels[hparams.task]
super().__init__(UpperCamelCase__ , UpperCamelCase__ , self.mode )
def _lowercase ( self , **UpperCamelCase__ ) -> Tuple:
return self.model(**UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
lowerCamelCase : Union[str, Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCamelCase : List[str] = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
lowerCamelCase : Optional[int] = self(**UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = outputs[0]
lowerCamelCase : str = self.trainer.lr_schedulers[0]["scheduler"]
lowerCamelCase : Optional[int] = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _lowercase ( self ) -> str:
lowerCamelCase : Any = self.hparams
lowerCamelCase : Union[str, Any] = processors[args.task]()
lowerCamelCase : Optional[int] = processor.get_labels()
for mode in ["train", "dev"]:
lowerCamelCase : Optional[Any] = self._feature_file(UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , UpperCamelCase__ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
lowerCamelCase : List[str] = (
processor.get_dev_examples(args.data_dir )
if mode == "dev"
else processor.get_train_examples(args.data_dir )
)
lowerCamelCase : Dict = convert_examples_to_features(
UpperCamelCase__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("Saving features into cached file %s" , UpperCamelCase__ )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> DataLoader:
lowerCamelCase : str = "dev" if mode == "test" else mode
lowerCamelCase : int = self._feature_file(UpperCamelCase__ )
logger.info("Loading features from cached file %s" , UpperCamelCase__ )
lowerCamelCase : str = torch.load(UpperCamelCase__ )
lowerCamelCase : List[str] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCamelCase : str = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
lowerCamelCase : List[str] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowerCamelCase : Any = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowerCamelCase : Union[str, Any] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , batch_size=UpperCamelCase__ , shuffle=UpperCamelCase__ , )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
lowerCamelCase : Dict = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCamelCase : Tuple = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None
lowerCamelCase : Dict = self(**UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : Any = outputs[:2]
lowerCamelCase : Union[str, Any] = logits.detach().cpu().numpy()
lowerCamelCase : Optional[Any] = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowercase ( self , UpperCamelCase__ ) -> tuple:
lowerCamelCase : Union[str, Any] = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item()
lowerCamelCase : Optional[int] = np.concatenate([x["pred"] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
lowerCamelCase : Union[str, Any] = np.argmax(UpperCamelCase__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowerCamelCase : str = np.squeeze(UpperCamelCase__ )
lowerCamelCase : List[Any] = np.concatenate([x["target"] for x in outputs] , axis=0 )
lowerCamelCase : List[str] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCamelCase : Dict = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , UpperCamelCase__ , UpperCamelCase__ )}
lowerCamelCase : List[str] = dict(results.items() )
lowerCamelCase : Optional[int] = results
return ret, preds_list, out_label_list
def _lowercase ( self , UpperCamelCase__ ) -> dict:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = self._eval_end(UpperCamelCase__ )
lowerCamelCase : str = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowercase ( self , UpperCamelCase__ ) -> dict:
lowerCamelCase , lowerCamelCase , lowerCamelCase : str = self._eval_end(UpperCamelCase__ )
lowerCamelCase : str = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowercase ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ )
parser.add_argument(
"--max_seq_length" , default=128 , type=UpperCamelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--task" , default="" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The GLUE task to run" , )
parser.add_argument(
"--gpus" , default=0 , type=UpperCamelCase__ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
def A ( ) -> int:
lowerCamelCase : int = argparse.ArgumentParser()
add_generic_args(_SCREAMING_SNAKE_CASE ,os.getcwd() )
lowerCamelCase : str = GLUETransformer.add_model_specific_args(_SCREAMING_SNAKE_CASE ,os.getcwd() )
lowerCamelCase : str = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCamelCase : int = os.path.join(
"./results" ,f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' ,)
os.makedirs(args.output_dir )
lowerCamelCase : int = GLUETransformer(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Dict = generic_train(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCamelCase : Optional[int] = sorted(glob.glob(os.path.join(args.output_dir ,"checkpoint-epoch=*.ckpt" ) ,recursive=_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Tuple = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 48
| 1
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowerCamelCase : Tuple = [0 for i in range(r + 1 )]
# nc0 = 1
lowerCamelCase : Optional[int] = 1
for i in range(1 ,n + 1 ):
# to compute current row from previous row.
lowerCamelCase : List[str] = min(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 48
|
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Any:
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
lowerCamelCase : str = (boundary[1] - boundary[0]) / steps
lowerCamelCase : List[str] = boundary[0]
lowerCamelCase : Union[str, Any] = boundary[1]
lowerCamelCase : int = make_points(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = 0.0
y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE )
for i in x_i:
# print(i)
y += h * f(_SCREAMING_SNAKE_CASE )
y += (h / 2.0) * f(_SCREAMING_SNAKE_CASE )
return y
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
lowerCamelCase : int = a + h
while x < (b - h):
yield x
lowerCamelCase : List[str] = x + h
def A ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]: # enter your function here
lowerCamelCase : str = (x - 0) * (x - 0)
return y
def A ( ) -> int:
lowerCamelCase : int = 0.0 # Lower bound of integration
lowerCamelCase : int = 1.0 # Upper bound of integration
lowerCamelCase : Dict = 10.0 # define number of steps or resolution
lowerCamelCase : int = [a, b] # define boundary of integration
lowerCamelCase : str = method_a(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 48
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.