code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import argparse
import datetime
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : List[Any] = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
__UpperCAmelCase : Dict = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_UpperCAmelCase ) < 11:
raise ValueError("Must be 10 characters long" )
# Get month
__UpperCAmelCase : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("Month must be between 1 - 12" )
__UpperCAmelCase : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get day
__UpperCAmelCase : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("Date must be between 1 - 31" )
# Get second separator
__UpperCAmelCase : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get year
__UpperCAmelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?" )
# Get datetime obj for validation
__UpperCAmelCase : Tuple = datetime.date(int(_UpperCAmelCase ) , int(_UpperCAmelCase ) , int(_UpperCAmelCase ) )
# Start math
if m <= 2:
__UpperCAmelCase : List[Any] = y - 1
__UpperCAmelCase : List[str] = m + 12
# maths var
__UpperCAmelCase : int = int(str(_UpperCAmelCase )[:2] )
__UpperCAmelCase : int = int(str(_UpperCAmelCase )[2:] )
__UpperCAmelCase : int = int(2.6 * m - 5.39 )
__UpperCAmelCase : int = int(c / 4 )
__UpperCAmelCase : int = int(k / 4 )
__UpperCAmelCase : int = int(d + k )
__UpperCAmelCase : int = int(t + u + v + x )
__UpperCAmelCase : int = int(z - (2 * c) )
__UpperCAmelCase : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer." )
# Response
__UpperCAmelCase : str = f"""Your date {date_input}, is a {days[str(_UpperCAmelCase )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_a : Tuple = argparse.ArgumentParser(
description=(
"Find out what day of the week nearly any date is or was. Enter "
"date as a string in the mm-dd-yyyy or mm/dd/yyyy format"
)
)
parser.add_argument(
"date_input", type=str, help="Date as a string (mm-dd-yyyy or mm/dd/yyyy)"
)
_a : Optional[int] = parser.parse_args()
zeller(args.date_input)
| 168 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : float):
return 0.0
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
SCREAMING_SNAKE_CASE_: Dict = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = 5_12
SCREAMING_SNAKE_CASE_: str = [1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE_: Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs]
SCREAMING_SNAKE_CASE_: Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE_: Tuple = np.abs(np.fft.fft(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_: Optional[Any] = 20 * np.logaa(_UpperCAmelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
SCREAMING_SNAKE_CASE_: Any = get_bounds(_UpperCAmelCase , _UpperCAmelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(_UpperCAmelCase )
plt.show()
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = 5_12
SCREAMING_SNAKE_CASE_: Union[str, Any] = [1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE_: Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs]
SCREAMING_SNAKE_CASE_: int = [0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE_: Any = np.angle(np.fft.fft(_UpperCAmelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(_UpperCAmelCase , -2 * pi ) )
plt.show()
| 671 | 0 |
from __future__ import annotations
def __lowercase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] ):
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
UpperCamelCase_ : Union[str, Any] = number_of_bytes // partitions
UpperCamelCase_ : int = []
for i in range(_UpperCAmelCase ):
UpperCamelCase_ : Dict = i * bytes_per_partition + 1
UpperCamelCase_ : List[Any] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F"{start_bytes}-{end_bytes}" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 417 |
from __future__ import annotations
from math import ceil, floor, sqrt
def A_ ( _UpperCAmelCase = 2_00_00_00 ):
SCREAMING_SNAKE_CASE_: list[int] = [0]
SCREAMING_SNAKE_CASE_: int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
SCREAMING_SNAKE_CASE_: int = 0
# the area corresponding to the grid that gives the product closest to target
SCREAMING_SNAKE_CASE_: int = 0
# an estimate of b, using the quadratic formula
SCREAMING_SNAKE_CASE_: float
# the largest integer less than b_estimate
SCREAMING_SNAKE_CASE_: int
# the largest integer less than b_estimate
SCREAMING_SNAKE_CASE_: int
# the triangle number corresponding to b_floor
SCREAMING_SNAKE_CASE_: int
# the triangle number corresponding to b_ceil
SCREAMING_SNAKE_CASE_: int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
SCREAMING_SNAKE_CASE_: List[Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
SCREAMING_SNAKE_CASE_: Any = floor(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = ceil(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Any = triangle_numbers[b_floor]
SCREAMING_SNAKE_CASE_: List[Any] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
SCREAMING_SNAKE_CASE_: int = triangle_b_first_guess * triangle_a
SCREAMING_SNAKE_CASE_: int = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
SCREAMING_SNAKE_CASE_: Optional[Any] = triangle_b_second_guess * triangle_a
SCREAMING_SNAKE_CASE_: Tuple = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'''{solution() = }''')
| 671 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase_ ( _lowerCamelCase: int ) -> int:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = 0.00
__lowerCamelCase : int = 0
for resistor in resistors:
if resistor <= 0:
__lowerCamelCase : Any = F"""Resistor at index {index} has a negative or zero value!"""
raise ValueError(_UpperCAmelCase )
first_sum += 1 / float(_UpperCAmelCase )
index += 1
return 1 / first_sum
def lowercase_ ( _lowerCamelCase: List[Any] ) -> Dict:
'''simple docstring'''
__lowerCamelCase : int = 0.00
__lowerCamelCase : Optional[int] = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__lowerCamelCase : str = F"""Resistor at index {index} has a negative value!"""
raise ValueError(_UpperCAmelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod() | 646 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Optional[int] = {
"""configuration_longformer""": [
"""LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LongformerConfig""",
"""LongformerOnnxConfig""",
],
"""tokenization_longformer""": ["""LongformerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = ["""LongformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = [
"""LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongformerForMaskedLM""",
"""LongformerForMultipleChoice""",
"""LongformerForQuestionAnswering""",
"""LongformerForSequenceClassification""",
"""LongformerForTokenClassification""",
"""LongformerModel""",
"""LongformerPreTrainedModel""",
"""LongformerSelfAttention""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
"""TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLongformerForMaskedLM""",
"""TFLongformerForMultipleChoice""",
"""TFLongformerForQuestionAnswering""",
"""TFLongformerForSequenceClassification""",
"""TFLongformerForTokenClassification""",
"""TFLongformerModel""",
"""TFLongformerPreTrainedModel""",
"""TFLongformerSelfAttention""",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 671 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCAmelCase : Tuple = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = ["""ChineseCLIPFeatureExtractor"""]
_UpperCAmelCase : str = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase : Optional[int] = [
# (stable-diffusion, HF Diffusers)
("""time_embed.0.weight""", """time_embedding.linear_1.weight"""),
("""time_embed.0.bias""", """time_embedding.linear_1.bias"""),
("""time_embed.2.weight""", """time_embedding.linear_2.weight"""),
("""time_embed.2.bias""", """time_embedding.linear_2.bias"""),
("""input_blocks.0.0.weight""", """conv_in.weight"""),
("""input_blocks.0.0.bias""", """conv_in.bias"""),
("""out.0.weight""", """conv_norm_out.weight"""),
("""out.0.bias""", """conv_norm_out.bias"""),
("""out.2.weight""", """conv_out.weight"""),
("""out.2.bias""", """conv_out.bias"""),
]
lowerCAmelCase : str = [
# (stable-diffusion, HF Diffusers)
("""in_layers.0""", """norm1"""),
("""in_layers.2""", """conv1"""),
("""out_layers.0""", """norm2"""),
("""out_layers.3""", """conv2"""),
("""emb_layers.1""", """time_emb_proj"""),
("""skip_connection""", """conv_shortcut"""),
]
lowerCAmelCase : List[str] = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase : int = f'''down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase : List[str] = f'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase : Any = f'''down_blocks.{i}.attentions.{j}.'''
lowerCAmelCase : List[Any] = f'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase : Any = f'''up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase : str = f'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase : List[Any] = f'''up_blocks.{i}.attentions.{j}.'''
lowerCAmelCase : str = f'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase : Any = f'''down_blocks.{i}.downsamplers.0.conv.'''
lowerCAmelCase : Tuple = f'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase : Tuple = f'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase : Tuple = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase : Any = """mid_block.attentions.0."""
lowerCAmelCase : Dict = """middle_block.1."""
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase : int = f'''mid_block.resnets.{j}.'''
lowerCAmelCase : Union[str, Any] = f'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def A_ ( _UpperCAmelCase ):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
SCREAMING_SNAKE_CASE_: Dict = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
SCREAMING_SNAKE_CASE_: Optional[int] = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
SCREAMING_SNAKE_CASE_: Any = v.replace(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: str = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
SCREAMING_SNAKE_CASE_: Optional[Any] = v.replace(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = v
SCREAMING_SNAKE_CASE_: Optional[Any] = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase : Union[str, Any] = [
# (stable-diffusion, HF Diffusers)
("""nin_shortcut""", """conv_shortcut"""),
("""norm_out""", """conv_norm_out"""),
("""mid.attn_1.""", """mid_block.attentions.0."""),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase : Union[str, Any] = f'''encoder.down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase : Optional[Any] = f'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase : Dict = f'''down_blocks.{i}.downsamplers.0.'''
lowerCAmelCase : List[str] = f'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase : List[str] = f'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase : int = f'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase : Any = f'''decoder.up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase : int = f'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase : str = f'''mid_block.resnets.{i}.'''
lowerCAmelCase : Tuple = f'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase : List[Any] = [
# (stable-diffusion, HF Diffusers)
("""norm.""", """group_norm."""),
("""q.""", """query."""),
("""k.""", """key."""),
("""v.""", """value."""),
("""proj_out.""", """proj_attn."""),
]
def A_ ( _UpperCAmelCase ):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
SCREAMING_SNAKE_CASE_: Union[str, Any] = v.replace(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
SCREAMING_SNAKE_CASE_: Any = v.replace(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = v
SCREAMING_SNAKE_CASE_: Tuple = {v: vae_state_dict[k] for k, v in mapping.items()}
SCREAMING_SNAKE_CASE_: Union[str, Any] = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"mid.attn_1.{weight_name}.weight" in k:
print(f"Reshaping {k} for SD format" )
SCREAMING_SNAKE_CASE_: List[str] = reshape_weight_for_sd(_UpperCAmelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase : Optional[Any] = [
# (stable-diffusion, HF Diffusers)
("""resblocks.""", """text_model.encoder.layers."""),
("""ln_1""", """layer_norm1"""),
("""ln_2""", """layer_norm2"""),
(""".c_fc.""", """.fc1."""),
(""".c_proj.""", """.fc2."""),
(""".attn""", """.self_attn"""),
("""ln_final.""", """transformer.text_model.final_layer_norm."""),
("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""),
("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""),
]
lowerCAmelCase : Optional[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase : Optional[int] = re.compile("""|""".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase : str = {"""q""": 0, """k""": 1, """v""": 2}
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: List[str] = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
SCREAMING_SNAKE_CASE_: str = k[: -len(".q_proj.weight" )]
SCREAMING_SNAKE_CASE_: Dict = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
SCREAMING_SNAKE_CASE_: Tuple = [None, None, None]
SCREAMING_SNAKE_CASE_: Union[str, Any] = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
SCREAMING_SNAKE_CASE_: Union[str, Any] = k[: -len(".q_proj.bias" )]
SCREAMING_SNAKE_CASE_: Any = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
SCREAMING_SNAKE_CASE_: List[Any] = [None, None, None]
SCREAMING_SNAKE_CASE_: List[str] = v
continue
SCREAMING_SNAKE_CASE_: int = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
SCREAMING_SNAKE_CASE_: str = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = torch.cat(_UpperCAmelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
SCREAMING_SNAKE_CASE_: Optional[int] = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = torch.cat(_UpperCAmelCase )
return new_state_dict
def A_ ( _UpperCAmelCase ):
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt."""
)
lowerCAmelCase : Optional[Any] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase : int = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""")
lowerCAmelCase : List[str] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""")
lowerCAmelCase : Optional[int] = osp.join(args.model_path, """text_encoder""", """model.safetensors""")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase : Optional[int] = load_file(unet_path, device="""cpu""")
else:
lowerCAmelCase : Union[str, Any] = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""")
lowerCAmelCase : Optional[Any] = torch.load(unet_path, map_location="""cpu""")
if osp.exists(vae_path):
lowerCAmelCase : str = load_file(vae_path, device="""cpu""")
else:
lowerCAmelCase : List[Any] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""")
lowerCAmelCase : Optional[Any] = torch.load(vae_path, map_location="""cpu""")
if osp.exists(text_enc_path):
lowerCAmelCase : List[Any] = load_file(text_enc_path, device="""cpu""")
else:
lowerCAmelCase : List[Any] = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""")
lowerCAmelCase : Optional[Any] = torch.load(text_enc_path, map_location="""cpu""")
# Convert the UNet model
lowerCAmelCase : int = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase : Optional[int] = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase : Union[str, Any] = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase : Optional[int] = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase : Any = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase : Any = {"""transformer.""" + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase : str = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase : Dict = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase : Any = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase : Optional[Any] = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase : Union[str, Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase : str = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase : int = {"""state_dict""": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 671 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case__ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__A = KandinskyVaaControlnetImgaImgPipeline
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
__A = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__A = False
@property
def UpperCamelCase ( self : Optional[Any] ) -> Dict:
return 32
@property
def UpperCamelCase ( self : Optional[Any] ) -> Tuple:
return 32
@property
def UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
return self.time_input_dim
@property
def UpperCamelCase ( self : Tuple ) -> Optional[Any]:
return self.time_input_dim * 4
@property
def UpperCamelCase ( self : Any ) -> Any:
return 1_00
@property
def UpperCamelCase ( self : List[Any] ) -> str:
torch.manual_seed(0 )
UpperCAmelCase_ = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCAmelCase_ = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def UpperCamelCase ( self : Dict ) -> int:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self : Union[str, Any] ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase_ = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase ( self : Dict ) -> List[Any]:
UpperCAmelCase_ = self.dummy_unet
UpperCAmelCase_ = self.dummy_movq
UpperCAmelCase_ = {
"num_train_timesteps": 10_00,
"beta_schedule": "linear",
"beta_start": 0.00_085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
UpperCAmelCase_ = DDIMScheduler(**lowerCAmelCase__ )
UpperCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCamelCase ( self : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int=0 ) -> int:
UpperCAmelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
UpperCAmelCase_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCAmelCase__ )
# create init_image
UpperCAmelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
UpperCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' ).resize((2_56, 2_56) )
# create hint
UpperCAmelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if str(lowerCAmelCase__ ).startswith('''mps''' ):
UpperCAmelCase_ = torch.manual_seed(lowerCAmelCase__ )
else:
UpperCAmelCase_ = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
UpperCAmelCase_ = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def UpperCamelCase ( self : str ) -> str:
UpperCAmelCase_ = "cpu"
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**lowerCAmelCase__ )
UpperCAmelCase_ = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase_ = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0]
UpperCAmelCase_ = image[0, -3:, -3:, -1]
UpperCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self : Any ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : Any ) -> Optional[Any]:
UpperCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
UpperCAmelCase_ = init_image.resize((5_12, 5_12) )
UpperCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
UpperCAmelCase_ = torch.from_numpy(np.array(lowerCAmelCase__ ) ).float() / 255.0
UpperCAmelCase_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCAmelCase_ = "A robot, 4k photo"
UpperCAmelCase_ = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
UpperCAmelCase_ = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
UpperCAmelCase_ = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase_ = torch.Generator(device='''cpu''' ).manual_seed(0 )
UpperCAmelCase_ = pipe_prior(
lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.85 , generator=lowerCAmelCase__ , negative_prompt='''''' , ).to_tuple()
UpperCAmelCase_ = pipeline(
image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , hint=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type='''np''' , )
UpperCAmelCase_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
| 121 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"""
),
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = '''xlm-prophetnet'''
_UpperCAmelCase : Any = ['''past_key_values''']
_UpperCAmelCase : Tuple = {
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self : str , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[Union[str, Callable]] = "gelu" , lowerCAmelCase__ : Optional[int] = 3_0522 , lowerCAmelCase__ : Optional[int] = 1024 , lowerCAmelCase__ : Optional[int] = 4096 , lowerCAmelCase__ : Optional[int] = 12 , lowerCAmelCase__ : Optional[int] = 16 , lowerCAmelCase__ : Optional[int] = 4096 , lowerCAmelCase__ : Optional[int] = 12 , lowerCAmelCase__ : Optional[int] = 16 , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[int] = 512 , lowerCAmelCase__ : Optional[float] = 0.02 , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[int] = 0 , lowerCAmelCase__ : Optional[int] = 2 , lowerCAmelCase__ : Optional[int] = 32 , lowerCAmelCase__ : Optional[int] = 128 , lowerCAmelCase__ : Optional[bool] = False , lowerCAmelCase__ : Optional[float] = 0.0 , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[int] = 0 , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 2 , **lowerCAmelCase__ : List[str] , ):
SCREAMING_SNAKE_CASE_: List[Any] = vocab_size
SCREAMING_SNAKE_CASE_: int = hidden_size
SCREAMING_SNAKE_CASE_: Any = encoder_ffn_dim
SCREAMING_SNAKE_CASE_: Tuple = num_encoder_layers
SCREAMING_SNAKE_CASE_: List[Any] = num_encoder_attention_heads
SCREAMING_SNAKE_CASE_: Dict = decoder_ffn_dim
SCREAMING_SNAKE_CASE_: Any = num_decoder_layers
SCREAMING_SNAKE_CASE_: Tuple = num_decoder_attention_heads
SCREAMING_SNAKE_CASE_: str = max_position_embeddings
SCREAMING_SNAKE_CASE_: str = init_std # Normal(0, this parameter)
SCREAMING_SNAKE_CASE_: Dict = activation_function
# parameters for xlmprophetnet
SCREAMING_SNAKE_CASE_: Optional[int] = ngram
SCREAMING_SNAKE_CASE_: Tuple = num_buckets
SCREAMING_SNAKE_CASE_: Union[str, Any] = relative_max_distance
SCREAMING_SNAKE_CASE_: List[str] = disable_ngram_loss
SCREAMING_SNAKE_CASE_: Dict = eps
# 3 Types of Dropout
SCREAMING_SNAKE_CASE_: Any = attention_dropout
SCREAMING_SNAKE_CASE_: Optional[int] = activation_dropout
SCREAMING_SNAKE_CASE_: str = dropout
SCREAMING_SNAKE_CASE_: Optional[int] = use_cache
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , add_cross_attention=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Any):
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`.")
| 671 | 0 |
import os
import numpy
import onnx
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Dict:
_lowercase : Dict = a.name
_lowercase : Optional[int] = b.name
_lowercase : Tuple = ""
_lowercase : Union[str, Any] = ""
_lowercase : str = a == b
_lowercase : Tuple = name_a
_lowercase : Any = name_b
return res
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(_UpperCAmelCase , _UpperCAmelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , _UpperCAmelCase , _UpperCAmelCase )
_graph_replace_input_with(node_proto.attribute[1].g , _UpperCAmelCase , _UpperCAmelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , _UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
for n in graph_proto.node:
_node_replace_input_with(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : int = list(model.graph.initializer )
_lowercase : Optional[int] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_lowercase : int = inits[i].name
_lowercase : List[Any] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , _UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_( lowerCamelCase_ ) -> int:
_lowercase : int = os.path.dirname(_UpperCAmelCase )
_lowercase : Tuple = os.path.basename(_UpperCAmelCase )
_lowercase : Optional[Any] = onnx.load(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
_lowercase : Tuple = list(model.graph.initializer )
_lowercase : Union[str, Any] = set()
_lowercase : Union[str, Any] = {}
_lowercase : int = []
_lowercase : Any = 0
for i in range(len(_UpperCAmelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(_UpperCAmelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(_UpperCAmelCase )
dup_set.add(_UpperCAmelCase )
_lowercase : Dict = inits[j].data_type
_lowercase : Optional[int] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , _UpperCAmelCase )
total_reduced_size += mem_size
_lowercase : List[Any] = inits[i].name
_lowercase : List[str] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(_UpperCAmelCase )
else:
_lowercase : List[str] = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
_lowercase : List[str] = sorted(_UpperCAmelCase )
_remove_dup_initializers_from_model(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_lowercase : Union[str, Any] = "optimized_" + model_file_name
_lowercase : Union[str, Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
onnx.save(_UpperCAmelCase , _UpperCAmelCase )
return new_model
| 89 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Dict = logging.get_logger(__name__)
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = b.T
SCREAMING_SNAKE_CASE_: Dict = np.sum(np.square(_UpperCAmelCase ) , axis=1 )
SCREAMING_SNAKE_CASE_: Tuple = np.sum(np.square(_UpperCAmelCase ) , axis=0 )
SCREAMING_SNAKE_CASE_: List[Any] = np.matmul(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = aa[:, None] - 2 * ab + ba[None, :]
return d
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = x.reshape(-1 , 3 )
SCREAMING_SNAKE_CASE_: Tuple = squared_euclidean_distance(_UpperCAmelCase , _UpperCAmelCase )
return np.argmin(_UpperCAmelCase , axis=1 )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : int = ['''pixel_values''']
def __init__( self : Tuple , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : List[str] , ):
super().__init__(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = size if size is not None else {"height": 256, "width": 256}
SCREAMING_SNAKE_CASE_: Tuple = get_size_dict(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = np.array(lowerCAmelCase__) if clusters is not None else None
SCREAMING_SNAKE_CASE_: Dict = do_resize
SCREAMING_SNAKE_CASE_: str = size
SCREAMING_SNAKE_CASE_: List[Any] = resample
SCREAMING_SNAKE_CASE_: Optional[int] = do_normalize
SCREAMING_SNAKE_CASE_: Dict = do_color_quantize
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ):
SCREAMING_SNAKE_CASE_: List[str] = get_size_dict(lowerCAmelCase__)
if "height" not in size or "width" not in size:
raise ValueError(F"Size dictionary must contain both height and width keys. Got {size.keys()}")
return resize(
lowerCAmelCase__ , size=(size["height"], size["width"]) , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , ):
SCREAMING_SNAKE_CASE_: str = rescale(image=lowerCAmelCase__ , scale=1 / 127.5 , data_format=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = image - 1
return image
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **lowerCAmelCase__ : Union[str, Any] , ):
SCREAMING_SNAKE_CASE_: Tuple = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_: Optional[int] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_: Dict = get_size_dict(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_: int = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_: List[str] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
SCREAMING_SNAKE_CASE_: Tuple = clusters if clusters is not None else self.clusters
SCREAMING_SNAKE_CASE_: Optional[int] = np.array(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True.")
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_: Union[str, Any] = [to_numpy_array(lowerCAmelCase__) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_: Optional[Any] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_: str = [self.normalize(image=lowerCAmelCase__) for image in images]
if do_color_quantize:
SCREAMING_SNAKE_CASE_: Any = [to_channel_dimension_format(lowerCAmelCase__ , ChannelDimension.LAST) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
SCREAMING_SNAKE_CASE_: List[Any] = np.array(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = color_quantize(lowerCAmelCase__ , lowerCAmelCase__).reshape(images.shape[:-1])
# flatten to (batch_size, height*width)
SCREAMING_SNAKE_CASE_: str = images.shape[0]
SCREAMING_SNAKE_CASE_: Tuple = images.reshape(lowerCAmelCase__ , -1)
# We need to convert back to a list of images to keep consistent behaviour across processors.
SCREAMING_SNAKE_CASE_: str = list(lowerCAmelCase__)
else:
SCREAMING_SNAKE_CASE_: Dict = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__) for image in images]
SCREAMING_SNAKE_CASE_: Optional[Any] = {"input_ids": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__)
| 671 | 0 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 612 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Tuple = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : int = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
lowerCAmelCase : int = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
lowerCAmelCase : List[Any] = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
lowerCAmelCase : Optional[int] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowerCAmelCase : Optional[int] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowerCAmelCase : List[str] = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Any = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : str = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase : List[Any] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
lowerCAmelCase : Optional[Any] = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
lowerCAmelCase : int = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase_ )
class __lowercase :
"""simple docstring"""
def __call__( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[bool] = None , **lowerCAmelCase__ : Tuple , ):
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE_: List[str] = titles if texts is None else texts
return super().__call__(
lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Optional[int] = titles if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [titles]
SCREAMING_SNAKE_CASE_: int = texts if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [texts]
SCREAMING_SNAKE_CASE_: str = len(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = questions if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [questions] * n_passages
if len(lowerCAmelCase__) != len(lowerCAmelCase__):
raise ValueError(
F"There should be as many titles than texts but got {len(lowerCAmelCase__)} titles and {len(lowerCAmelCase__)} texts.")
SCREAMING_SNAKE_CASE_: Optional[Any] = super().__call__(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)["input_ids"]
SCREAMING_SNAKE_CASE_: Union[str, Any] = super().__call__(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)["input_ids"]
SCREAMING_SNAKE_CASE_: int = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase__ , lowerCAmelCase__)
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE_: Dict = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
SCREAMING_SNAKE_CASE_: int = attention_mask
return self.pad(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : BatchEncoding , lowerCAmelCase__ : DPRReaderOutput , lowerCAmelCase__ : int = 16 , lowerCAmelCase__ : int = 64 , lowerCAmelCase__ : int = 4 , ):
SCREAMING_SNAKE_CASE_: int = reader_input["input_ids"]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = reader_output[:3]
SCREAMING_SNAKE_CASE_: Tuple = len(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = sorted(range(lowerCAmelCase__) , reverse=lowerCAmelCase__ , key=relevance_logits.__getitem__)
SCREAMING_SNAKE_CASE_: List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE_: Optional[int] = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE_: str = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE_: List[Any] = sequence_ids.index(self.pad_token_id)
else:
SCREAMING_SNAKE_CASE_: Dict = len(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase__ , top_spans=lowerCAmelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase__ , start_index=lowerCAmelCase__ , end_index=lowerCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(lowerCAmelCase__) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , ):
SCREAMING_SNAKE_CASE_: Any = []
for start_index, start_score in enumerate(lowerCAmelCase__):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
SCREAMING_SNAKE_CASE_: Union[str, Any] = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__: x[1] , reverse=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"Wrong span indices: [{start_index}:{end_index}]")
SCREAMING_SNAKE_CASE_: int = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"Span is too long: {length} > {max_answer_length}")
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(lowerCAmelCase__) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase_ )
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[Any] = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase : str = ['''input_ids''', '''attention_mask''']
| 671 | 0 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : Tuple , a_ : str , a_ : Tuple=13 , a_ : str=7 , a_ : Tuple=True , a_ : Tuple=True , a_ : List[Any]=True , a_ : Dict=True , a_ : Any=99 , a_ : Optional[int]=32 , a_ : Dict=5 , a_ : str=4 , a_ : Optional[Any]=37 , a_ : Optional[Any]="gelu" , a_ : Optional[Any]=0.1 , a_ : Tuple=0.1 , a_ : Dict=5_12 , a_ : Dict=16 , a_ : List[str]=2 , a_ : Optional[int]=0.02 , a_ : List[str]=3 , a_ : List[Any]=4 , a_ : Union[str, Any]=None , ):
lowerCAmelCase_ : Union[str, Any] = parent
lowerCAmelCase_ : int = batch_size
lowerCAmelCase_ : int = seq_length
lowerCAmelCase_ : Tuple = is_training
lowerCAmelCase_ : Dict = use_input_mask
lowerCAmelCase_ : Tuple = use_token_type_ids
lowerCAmelCase_ : Optional[int] = use_labels
lowerCAmelCase_ : List[str] = vocab_size
lowerCAmelCase_ : List[Any] = hidden_size
lowerCAmelCase_ : int = num_hidden_layers
lowerCAmelCase_ : List[Any] = num_attention_heads
lowerCAmelCase_ : Optional[int] = intermediate_size
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : Tuple = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : str = max_position_embeddings
lowerCAmelCase_ : int = type_vocab_size
lowerCAmelCase_ : Dict = type_sequence_label_size
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Tuple = num_labels
lowerCAmelCase_ : List[Any] = num_choices
lowerCAmelCase_ : Any = scope
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : Tuple = None
if self.use_input_mask:
lowerCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : Union[str, Any] = None
if self.use_token_type_ids:
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : Optional[int] = None
if self.use_labels:
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self : Union[str, Any] ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def lowerCamelCase ( self : Any , a_ : List[str] , a_ : List[Any] , a_ : Dict , a_ : str , a_ : Optional[int] , a_ : List[Any] , a_ : Optional[Any] ):
lowerCAmelCase_ : Union[str, Any] = NystromformerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCAmelCase_ : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self : Optional[int] , a_ : Optional[int] , a_ : Dict , a_ : Optional[Any] , a_ : Tuple , a_ : Optional[Any] , a_ : Dict , a_ : int ):
lowerCAmelCase_ : Dict = NystromformerForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self : str , a_ : str , a_ : Union[str, Any] , a_ : Optional[int] , a_ : Optional[Any] , a_ : int , a_ : str , a_ : Union[str, Any] ):
lowerCAmelCase_ : int = NystromformerForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCAmelCase_ : List[Any] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self : Optional[int] , a_ : Tuple , a_ : Optional[Any] , a_ : List[Any] , a_ : List[str] , a_ : int , a_ : Optional[int] , a_ : List[str] ):
lowerCAmelCase_ : int = self.num_labels
lowerCAmelCase_ : Optional[int] = NystromformerForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCAmelCase_ : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self : Optional[Any] , a_ : Optional[Any] , a_ : Optional[int] , a_ : Tuple , a_ : str , a_ : Optional[int] , a_ : Optional[Any] , a_ : Tuple ):
lowerCAmelCase_ : int = self.num_labels
lowerCAmelCase_ : Optional[int] = NystromformerForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCAmelCase_ : Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self : List[Any] , a_ : List[Any] , a_ : int , a_ : Dict , a_ : Tuple , a_ : str , a_ : Optional[int] , a_ : Any ):
lowerCAmelCase_ : Tuple = self.num_choices
lowerCAmelCase_ : str = NystromformerForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
lowerCAmelCase_ : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : str = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : List[str] = self.prepare_config_and_inputs()
(
lowerCAmelCase_
) : Union[str, Any] = config_and_inputs
lowerCAmelCase_ : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : Dict = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
a_ : Dict = (
{
'''feature-extraction''': NystromformerModel,
'''fill-mask''': NystromformerForMaskedLM,
'''question-answering''': NystromformerForQuestionAnswering,
'''text-classification''': NystromformerForSequenceClassification,
'''token-classification''': NystromformerForTokenClassification,
'''zero-shot''': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Any = False
a_ : List[str] = False
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : Union[str, Any] = NystromformerModelTester(self )
lowerCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def lowerCamelCase ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Union[str, Any] ):
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ : Tuple = type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__ )
def lowerCamelCase ( self : Union[str, Any] ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def lowerCamelCase ( self : Dict ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Union[str, Any] = NystromformerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : List[Any] = NystromformerModel.from_pretrained("uw-madison/nystromformer-512" )
lowerCAmelCase_ : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
lowerCAmelCase_ : Union[str, Any] = model(lowerCAmelCase__ )[0]
lowerCAmelCase_ : List[Any] = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape , lowerCAmelCase__ )
lowerCAmelCase_ : Any = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Tuple = "the [MASK] of Belgium is Brussels"
lowerCAmelCase_ : Tuple = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" )
lowerCAmelCase_ : Union[str, Any] = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" )
lowerCAmelCase_ : Any = tokenizer(lowerCAmelCase__ , return_tensors="pt" )
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = model(encoding.input_ids ).logits
lowerCAmelCase_ : Any = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(lowerCAmelCase__ ) , "capital" )
| 610 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = DistilBertTokenizer
_UpperCAmelCase : Union[str, Any] = DistilBertTokenizerFast
_UpperCAmelCase : int = True
@slow
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[Any] = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")
SCREAMING_SNAKE_CASE_: Any = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 671 | 0 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__UpperCamelCase : Dict = """hf-internal-testing/tiny-random-bert"""
__UpperCamelCase : List[Any] = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
__UpperCamelCase : Any = """9b8c223d42b2188cb49d29af482996f9d0f3e5a6"""
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowerCamelCase : List[str] = cached_file(lowerCAmelCase__ , lowerCAmelCase__ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(lowerCAmelCase__ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) ) )
with open(os.path.join(lowerCAmelCase__ , """refs""" , """main""" ) ) as f:
__lowerCamelCase : int = f.read()
self.assertEqual(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , """snapshots""" , lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertTrue(os.path.isfile(lowerCAmelCase__ ) )
# File is cached at the same place the second time.
__lowerCamelCase : List[str] = cached_file(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Using a specific revision to test the full commit hash.
__lowerCamelCase : Tuple = cached_file(lowerCAmelCase__ , lowerCAmelCase__ , revision="""9b8c223""" )
self.assertEqual(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , """snapshots""" , lowerCAmelCase__ , lowerCAmelCase__ ) )
def _snake_case ( self : Any ):
'''simple docstring'''
with self.assertRaisesRegex(lowerCAmelCase__ , """is not a valid model identifier""" ):
__lowerCamelCase : Optional[Any] = cached_file("""tiny-random-bert""" , lowerCAmelCase__ )
with self.assertRaisesRegex(lowerCAmelCase__ , """is not a valid git identifier""" ):
__lowerCamelCase : Optional[Any] = cached_file(lowerCAmelCase__ , lowerCAmelCase__ , revision="""aaaa""" )
with self.assertRaisesRegex(lowerCAmelCase__ , """does not appear to have a file named""" ):
__lowerCamelCase : str = cached_file(lowerCAmelCase__ , """conf""" )
def _snake_case ( self : Dict ):
'''simple docstring'''
with self.assertRaisesRegex(lowerCAmelCase__ , """does not appear to have a file named""" ):
__lowerCamelCase : Any = cached_file(lowerCAmelCase__ , """conf""" )
with open(os.path.join(lowerCAmelCase__ , """refs""" , """main""" ) ) as f:
__lowerCamelCase : Dict = f.read()
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase__ , """.no_exist""" , lowerCAmelCase__ , """conf""" ) ) )
__lowerCamelCase : Optional[int] = cached_file(lowerCAmelCase__ , """conf""" , _raise_exceptions_for_missing_entries=lowerCAmelCase__ )
self.assertIsNone(lowerCAmelCase__ )
__lowerCamelCase : Any = cached_file(lowerCAmelCase__ , """conf""" , local_files_only=lowerCAmelCase__ , _raise_exceptions_for_missing_entries=lowerCAmelCase__ )
self.assertIsNone(lowerCAmelCase__ )
__lowerCamelCase : Optional[Any] = mock.Mock()
__lowerCamelCase : int = 5_0_0
__lowerCamelCase : str = {}
__lowerCamelCase : Optional[int] = HTTPError
__lowerCamelCase : Tuple = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=lowerCAmelCase__ ) as mock_head:
__lowerCamelCase : Dict = cached_file(lowerCAmelCase__ , """conf""" , _raise_exceptions_for_connection_errors=lowerCAmelCase__ )
self.assertIsNone(lowerCAmelCase__ )
# This check we did call the fake head request
mock_head.assert_called()
def _snake_case ( self : str ):
'''simple docstring'''
self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , lowerCAmelCase__ ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , lowerCAmelCase__ ) )
self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , lowerCAmelCase__ ) )
def _snake_case ( self : Tuple ):
'''simple docstring'''
self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt""" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(lowerCAmelCase__ , """is not a valid model identifier""" ):
get_file_from_repo("""bert-base-case""" , lowerCAmelCase__ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(lowerCAmelCase__ , """is not a valid git identifier""" ):
get_file_from_repo("""bert-base-cased""" , lowerCAmelCase__ , revision="""ahaha""" )
__lowerCamelCase : str = get_file_from_repo("""bert-base-cased""" , lowerCAmelCase__ )
# The name is the cached name which is not very easy to test, so instead we load the content.
__lowerCamelCase : Dict = json.loads(open(lowerCAmelCase__ , """r""" ).read() )
self.assertEqual(config["""hidden_size"""] , 7_6_8 )
def _snake_case ( self : List[str] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : Optional[Any] = Path(lowerCAmelCase__ ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(lowerCAmelCase__ , """a.txt""" ) , str(lowerCAmelCase__ ) )
self.assertIsNone(get_file_from_repo(lowerCAmelCase__ , """b.txt""" ) )
| 519 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowerCAmelCase : List[Any] = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = test_results.split(" " )
SCREAMING_SNAKE_CASE_: Tuple = 0
SCREAMING_SNAKE_CASE_: str = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
SCREAMING_SNAKE_CASE_: Optional[Any] = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(_UpperCAmelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: Any = None
SCREAMING_SNAKE_CASE_: Union[str, Any] = False
for line in failures_short_lines.split("\n" ):
if re.search(R"_ \[doctest\]" , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = True
SCREAMING_SNAKE_CASE_: Dict = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
SCREAMING_SNAKE_CASE_: Union[str, Any] = line
SCREAMING_SNAKE_CASE_: List[str] = False
return failures
class __lowercase :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Dict = title
SCREAMING_SNAKE_CASE_: int = doc_test_results["time_spent"].split(",")[0]
SCREAMING_SNAKE_CASE_: int = doc_test_results["success"]
SCREAMING_SNAKE_CASE_: Optional[Any] = doc_test_results["failures"]
SCREAMING_SNAKE_CASE_: Any = self.n_success + self.n_failures
# Failures and success of the modeling tests
SCREAMING_SNAKE_CASE_: Optional[int] = doc_test_results
@property
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: int = [self._time_spent]
SCREAMING_SNAKE_CASE_: List[Any] = 0
for time in time_spent:
SCREAMING_SNAKE_CASE_: Union[str, Any] = time.split(":")
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowerCAmelCase__) == 1:
SCREAMING_SNAKE_CASE_: Dict = [0, 0, time_parts[0]]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = int(time_parts[0]), int(time_parts[1]), float(time_parts[2])
total_secs += hours * 3600 + minutes * 60 + seconds
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F"{int(lowerCAmelCase__)}h{int(lowerCAmelCase__)}m{int(lowerCAmelCase__)}s"
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
F" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[Any] = 40
SCREAMING_SNAKE_CASE_: List[str] = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase__ , lowerCAmelCase__)}
SCREAMING_SNAKE_CASE_: Tuple = ""
for category, failures in category_failures.items():
if len(lowerCAmelCase__) == 0:
continue
if report != "":
report += "\n\n"
report += F"*{category} failures*:".ljust(line_length // 2).rjust(line_length // 2) + "\n"
report += "`"
report += "`\n`".join(lowerCAmelCase__)
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: Optional[Any] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures)
if self.n_failures > 0:
blocks.extend([self.category_failures])
if self.n_failures == 0:
blocks.append(self.no_failures)
return json.dumps(lowerCAmelCase__)
@staticmethod
def _SCREAMING_SNAKE_CASE ( ):
SCREAMING_SNAKE_CASE_: List[str] = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print("Sending the following payload")
print(json.dumps({"blocks": json.loads(lowerCAmelCase__)}))
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=lowerCAmelCase__ , )
def _SCREAMING_SNAKE_CASE ( self : Tuple):
print("Sending the following payload")
print(json.dumps({"blocks": json.loads(self.payload)}))
SCREAMING_SNAKE_CASE_: Optional[Any] = F"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed."
SCREAMING_SNAKE_CASE_: List[Any] = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=lowerCAmelCase__ , )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Dict = ""
for key, value in failures.items():
SCREAMING_SNAKE_CASE_: str = value[:200] + " [Truncated]" if len(lowerCAmelCase__) > 250 else value
failures_text += F"*{key}*\n_{value}_\n\n"
SCREAMING_SNAKE_CASE_: Any = job_name
SCREAMING_SNAKE_CASE_: List[Any] = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
SCREAMING_SNAKE_CASE_: Tuple = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _SCREAMING_SNAKE_CASE ( self : Any):
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made.")
SCREAMING_SNAKE_CASE_: Tuple = self.doc_test_results.pop("job_link")
self.doc_test_results.pop("failures")
self.doc_test_results.pop("success")
self.doc_test_results.pop("time_spent")
SCREAMING_SNAKE_CASE_: Any = sorted(self.doc_test_results.items() , key=lambda lowerCAmelCase__: t[0])
for job, job_result in sorted_dict:
if len(job_result["failures"]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = F"*Num failures* :{len(job_result['failed'])} \n"
SCREAMING_SNAKE_CASE_: Optional[Any] = job_result["failures"]
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_reply_blocks(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , text=lowerCAmelCase__)
print("Sending the following reply")
print(json.dumps({"blocks": blocks}))
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F"Results for {job}" , blocks=lowerCAmelCase__ , thread_ts=self.thread_ts["ts"] , )
time.sleep(1)
def A_ ( ):
SCREAMING_SNAKE_CASE_: Tuple = os.environ["GITHUB_RUN_ID"]
SCREAMING_SNAKE_CASE_: Any = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
SCREAMING_SNAKE_CASE_: List[Any] = requests.get(_UpperCAmelCase ).json()
SCREAMING_SNAKE_CASE_: Optional[Any] = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
SCREAMING_SNAKE_CASE_: Any = math.ceil((result["total_count"] - 1_00) / 1_00 )
for i in range(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , _UpperCAmelCase )
return {}
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = {}
if os.path.exists(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = os.listdir(_UpperCAmelCase )
for file in files:
try:
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE_: Dict = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(_UpperCAmelCase , _UpperCAmelCase )}." ) from e
return _artifact
def A_ ( ):
class __lowercase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: Dict = name
SCREAMING_SNAKE_CASE_: List[str] = []
def __str__( self : Optional[Any]):
return self.name
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : str):
self.paths.append({"name": self.name, "path": path})
SCREAMING_SNAKE_CASE_: Dict[str, Artifact] = {}
SCREAMING_SNAKE_CASE_: List[Any] = filter(os.path.isdir , os.listdir() )
for directory in directories:
SCREAMING_SNAKE_CASE_: Dict = directory
if artifact_name not in _available_artifacts:
SCREAMING_SNAKE_CASE_: Tuple = Artifact(_UpperCAmelCase )
_available_artifacts[artifact_name].add_path(_UpperCAmelCase )
return _available_artifacts
if __name__ == "__main__":
lowerCAmelCase : Tuple = get_job_links()
lowerCAmelCase : Optional[Any] = retrieve_available_artifacts()
lowerCAmelCase : Any = collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowerCAmelCase : int = {
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowerCAmelCase : Optional[int] = github_actions_job_links.get("""run_doctests""")
lowerCAmelCase : List[Any] = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
lowerCAmelCase : Any = retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = handle_test_results(artifact["""stats"""])
lowerCAmelCase : List[str] = failed
lowerCAmelCase : Any = success
lowerCAmelCase : Dict = time_spent[1:-1] + """, """
lowerCAmelCase : str = extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
lowerCAmelCase : Tuple = line.replace("""FAILED """, """""")
lowerCAmelCase : str = line.split()[0].replace("""\n""", """""")
if "::" in line:
lowerCAmelCase , lowerCAmelCase : Optional[int] = line.split("""::""")
else:
lowerCAmelCase , lowerCAmelCase : str = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowerCAmelCase : str = docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowerCAmelCase : str = all_failures[test] if test in all_failures else """N/A"""
lowerCAmelCase : Any = failure
break
lowerCAmelCase : Union[str, Any] = Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 671 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ : Optional[Any] = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : List[str] = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[int] = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Tuple = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 31 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : str = 16
lowerCAmelCase : List[Any] = 32
def A_ ( _UpperCAmelCase , _UpperCAmelCase = 16 ):
SCREAMING_SNAKE_CASE_: List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE_: Tuple = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_: str = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_: List[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_: Tuple = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_: int = 8
else:
SCREAMING_SNAKE_CASE_: Any = None
return tokenizer.pad(
_UpperCAmelCase , padding="longest" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Optional[Any] = DataLoader(
tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple = DataLoader(
tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase : Optional[int] = mocked_dataloaders # noqa: F811
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _UpperCAmelCase ) == "1":
SCREAMING_SNAKE_CASE_: Tuple = 2
# New Code #
SCREAMING_SNAKE_CASE_: List[str] = int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE_: int = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCAmelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_: Tuple = config["lr"]
SCREAMING_SNAKE_CASE_: List[str] = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE_: List[str] = int(config["seed"] )
SCREAMING_SNAKE_CASE_: Optional[int] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE_: str = evaluate.load("glue" , "mrpc" )
set_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_: Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_: List[Any] = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_: Union[str, Any] = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_: str = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = output.loss
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[Any] = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE_: List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _UpperCAmelCase )
def A_ ( ):
SCREAMING_SNAKE_CASE_: str = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=_UpperCAmelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE_: List[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE_: Tuple = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 671 | 0 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case__ ( ) ->List[str]:
"""simple docstring"""
__lowercase : List[Any] = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores", type=_UpperCAmelCase, default=1, help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script", type=_UpperCAmelCase, help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
), )
# rest from the training program
parser.add_argument("training_script_args", nargs=_UpperCAmelCase )
return parser.parse_args()
def snake_case__ ( ) ->Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = parse_args()
# Import training_script as a module.
__lowercase : Union[str, Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowercase : Tuple = script_fpath.stem
__lowercase : Tuple = importlib.import_module(_UpperCAmelCase )
# Patch sys.argv
__lowercase : Optional[int] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 575 |
from math import asin, atan, cos, radians, sin, sqrt, tan
lowerCAmelCase : Union[str, Any] = 637_8137.0
lowerCAmelCase : int = 635_6752.31_4245
lowerCAmelCase : Union[str, Any] = 6378137
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = (AXIS_A - AXIS_B) / AXIS_A
SCREAMING_SNAKE_CASE_: str = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
SCREAMING_SNAKE_CASE_: Optional[int] = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
SCREAMING_SNAKE_CASE_: Any = radians(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = radians(_UpperCAmelCase )
# Equation
SCREAMING_SNAKE_CASE_: str = sin((phi_a - phi_a) / 2 )
SCREAMING_SNAKE_CASE_: List[Any] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
SCREAMING_SNAKE_CASE_: Tuple = sqrt(sin_sq_phi + (cos(_UpperCAmelCase ) * cos(_UpperCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 0 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError("Input must be an integer" )
if input_num <= 0:
raise ValueError("Input must be positive" )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_: List[Any] = BertConfig.from_json_file(_UpperCAmelCase )
print(f"Building PyTorch model from configuration: {config}" )
SCREAMING_SNAKE_CASE_: Tuple = BertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 671 | 0 |
import os
def __lowercase ( ):
UpperCamelCase_ : Dict = os.path.join(os.path.dirname(_UpperCAmelCase ) , 'num.txt' )
with open(_UpperCAmelCase ) as file_hand:
return str(sum(int(_UpperCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 417 |
import math
def A_ ( _UpperCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A_ ( _UpperCAmelCase = 0.1 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = 3
SCREAMING_SNAKE_CASE_: Optional[int] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_UpperCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 0 |
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def lowercase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: List[str]="shi-labs/oneformer_demo" ) -> Optional[Any]:
'''simple docstring'''
with open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) as f:
__lowerCamelCase : Dict = json.load(_UpperCAmelCase )
__lowerCamelCase : str = {}
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : Optional[Any] = []
for key, info in class_info.items():
__lowerCamelCase : List[str] = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(_UpperCAmelCase ) )
__lowerCamelCase : str = thing_ids
__lowerCamelCase : Dict = class_names
return metadata
class _snake_case ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict=7 , UpperCAmelCase : Optional[int]=3 , UpperCAmelCase : int=30 , UpperCAmelCase : List[Any]=400 , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : List[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : Optional[Any]=255 , UpperCAmelCase : Optional[int]="shi-labs/oneformer_demo" , UpperCAmelCase : Optional[Any]="ade20k_panoptic.json" , UpperCAmelCase : List[Any]=10 , ):
__lowerCamelCase : Optional[Any] = parent
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : Dict = num_channels
__lowerCamelCase : List[str] = min_resolution
__lowerCamelCase : Union[str, Any] = max_resolution
__lowerCamelCase : Union[str, Any] = do_resize
__lowerCamelCase : Optional[int] = {"shortest_edge": 32, "longest_edge": 1333} if size is None else size
__lowerCamelCase : Union[str, Any] = do_normalize
__lowerCamelCase : List[Any] = image_mean
__lowerCamelCase : Optional[Any] = image_std
__lowerCamelCase : int = class_info_file
__lowerCamelCase : List[Any] = prepare_metadata(lowerCAmelCase__ , lowerCAmelCase__ )
__lowerCamelCase : Union[str, Any] = num_text
__lowerCamelCase : Union[str, Any] = repo_path
# for the post_process_functions
__lowerCamelCase : int = 2
__lowerCamelCase : Optional[Any] = 10
__lowerCamelCase : Optional[int] = 10
__lowerCamelCase : str = 3
__lowerCamelCase : Tuple = 4
__lowerCamelCase : Any = num_labels
__lowerCamelCase : Dict = do_reduce_labels
__lowerCamelCase : int = ignore_index
def lowerCamelCase__ ( self : int ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]=False ):
if not batched:
__lowerCamelCase : Optional[Any] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
__lowerCamelCase : Optional[int] = image.size
else:
__lowerCamelCase : Optional[int] = image.shape[1], image.shape[2]
if w < h:
__lowerCamelCase : Optional[int] = int(self.size["shortest_edge"] * h / w )
__lowerCamelCase : Optional[int] = self.size["shortest_edge"]
elif w > h:
__lowerCamelCase : Union[str, Any] = self.size["shortest_edge"]
__lowerCamelCase : Dict = int(self.size["shortest_edge"] * w / h )
else:
__lowerCamelCase : Tuple = self.size["shortest_edge"]
__lowerCamelCase : Union[str, Any] = self.size["shortest_edge"]
else:
__lowerCamelCase : int = []
for image in image_inputs:
__lowerCamelCase : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCamelCase : List[str] = max(lowerCAmelCase__ , key=lambda UpperCAmelCase : item[0] )[0]
__lowerCamelCase : Optional[int] = max(lowerCAmelCase__ , key=lambda UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
def lowerCamelCase__ ( self : str ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
snake_case__ = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
snake_case__ = image_processing_class
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : Union[str, Any] = OneFormerImageProcessorTester(self )
@property
def lowerCamelCase__ ( self : List[Any] ):
return self.image_processing_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_std" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "ignore_index" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "class_info_file" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "num_text" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "repo_path" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "metadata" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_reduce_labels" ) )
def lowerCamelCase__ ( self : Optional[Any] ):
pass
def lowerCamelCase__ ( self : Tuple ):
# Initialize image_processor
__lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
__lowerCamelCase : Optional[Any] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
__lowerCamelCase : Tuple = self.image_processing_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase : Any = self.image_processing_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
__lowerCamelCase : Any = image_processor(
lowerCAmelCase__ , ["semantic"] * len(lowerCAmelCase__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self : List[str] ):
# Initialize image_processor
__lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
__lowerCamelCase : Optional[int] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
__lowerCamelCase : Dict = self.image_processing_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase : Tuple = self.image_processing_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
__lowerCamelCase : Optional[int] = image_processor(
lowerCAmelCase__ , ["semantic"] * len(lowerCAmelCase__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self : List[Any] ):
# Initialize image_processor
__lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
__lowerCamelCase : int = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
__lowerCamelCase : str = self.image_processing_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase : Optional[int] = self.image_processing_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
__lowerCamelCase : List[Any] = image_processor(
lowerCAmelCase__ , ["semantic"] * len(lowerCAmelCase__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : Optional[Any]="np" ):
__lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
__lowerCamelCase : Any = self.image_processing_tester.num_labels
__lowerCamelCase : Tuple = None
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=lowerCAmelCase__ )
if with_segmentation_maps:
__lowerCamelCase : List[str] = num_labels
if is_instance_map:
__lowerCamelCase : Tuple = list(range(lowerCAmelCase__ ) ) * 2
__lowerCamelCase : Dict = dict(enumerate(lowerCAmelCase__ ) )
__lowerCamelCase : str = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
__lowerCamelCase : Union[str, Any] = [Image.fromarray(lowerCAmelCase__ ) for annotation in annotations]
__lowerCamelCase : Union[str, Any] = image_processor(
lowerCAmelCase__ , ["semantic"] * len(lowerCAmelCase__ ) , lowerCAmelCase__ , return_tensors="pt" , instance_id_to_semantic_id=lowerCAmelCase__ , pad_and_return_pixel_mask=lowerCAmelCase__ , )
return inputs
def lowerCamelCase__ ( self : Dict ):
pass
def lowerCamelCase__ ( self : List[str] ):
def common(UpperCAmelCase : int=False , UpperCAmelCase : Any=None ):
__lowerCamelCase : str = self.comm_get_image_processor_inputs(
with_segmentation_maps=lowerCAmelCase__ , is_instance_map=lowerCAmelCase__ , segmentation_type=lowerCAmelCase__ )
__lowerCamelCase : Optional[Any] = inputs["mask_labels"]
__lowerCamelCase : str = inputs["class_labels"]
__lowerCamelCase : Any = inputs["pixel_values"]
__lowerCamelCase : Union[str, Any] = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(lowerCAmelCase__ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=lowerCAmelCase__ )
common(is_instance_map=lowerCAmelCase__ , segmentation_type="pil" )
common(is_instance_map=lowerCAmelCase__ , segmentation_type="pil" )
def lowerCamelCase__ ( self : int ):
__lowerCamelCase : List[str] = np.zeros((20, 50) )
__lowerCamelCase : str = 1
__lowerCamelCase : Dict = 1
__lowerCamelCase : str = 1
__lowerCamelCase : Tuple = binary_mask_to_rle(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
__lowerCamelCase : int = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCamelCase : Any = fature_extractor.post_process_semantic_segmentation(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
__lowerCamelCase : Union[str, Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
__lowerCamelCase : int = fature_extractor.post_process_semantic_segmentation(lowerCAmelCase__ , target_sizes=lowerCAmelCase__ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Dict = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
__lowerCamelCase : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCamelCase : Union[str, Any] = image_processor.post_process_instance_segmentation(lowerCAmelCase__ , threshold=0 )
self.assertTrue(len(lowerCAmelCase__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , lowerCAmelCase__ )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
__lowerCamelCase : List[str] = self.image_processing_tester.get_fake_oneformer_outputs()
__lowerCamelCase : str = image_processor.post_process_panoptic_segmentation(lowerCAmelCase__ , threshold=0 )
self.assertTrue(len(lowerCAmelCase__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , lowerCAmelCase__ )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) | 646 |
import re
def A_ ( _UpperCAmelCase ):
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
try:
SCREAMING_SNAKE_CASE_: List[Any] = split_input(_UpperCAmelCase )
if upper:
SCREAMING_SNAKE_CASE_: List[str] = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
SCREAMING_SNAKE_CASE_: Optional[int] = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def A_ ( _UpperCAmelCase ):
return to_simple_case(_UpperCAmelCase )
def A_ ( _UpperCAmelCase ):
try:
SCREAMING_SNAKE_CASE_: Optional[int] = to_simple_case(_UpperCAmelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
return to_complex_case(_UpperCAmelCase , _UpperCAmelCase , "_" )
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
return to_complex_case(_UpperCAmelCase , _UpperCAmelCase , "-" )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 671 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( UpperCAmelCase_ ):
UpperCamelCase_ :List[str] = (DPMSolverSinglestepScheduler,)
UpperCamelCase_ :Tuple = (('''num_inference_steps''', 25),)
def __snake_case ( self : List[str] , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
lowerCAmelCase__ = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float('''inf''' ),
"variance_type": None,
}
config.update(**lowerCAmelCase__ )
return config
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0 , **SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase__ = dict(self.forward_default_kwargs )
lowerCAmelCase__ = kwargs.pop('''num_inference_steps''' , lowerCAmelCase__ )
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
lowerCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.get_scheduler_config(**lowerCAmelCase__ )
lowerCAmelCase__ = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals
lowerCAmelCase__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__ )
lowerCAmelCase__ = scheduler_class.from_pretrained(lowerCAmelCase__ )
new_scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals
lowerCAmelCase__ = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase__ = sample, sample
for t in range(lowerCAmelCase__ , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase__ = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
lowerCAmelCase__ = new_scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __snake_case ( self : int ):
pass
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0 , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = dict(self.forward_default_kwargs )
lowerCAmelCase__ = kwargs.pop('''num_inference_steps''' , lowerCAmelCase__ )
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
lowerCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__ )
lowerCAmelCase__ = scheduler_class.from_pretrained(lowerCAmelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase__ = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase__ = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
lowerCAmelCase__ = new_scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : str=None , **SCREAMING_SNAKE_CASE_ : int ):
if scheduler is None:
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config(**lowerCAmelCase__ )
lowerCAmelCase__ = scheduler_class(**lowerCAmelCase__ )
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config(**lowerCAmelCase__ )
lowerCAmelCase__ = scheduler_class(**lowerCAmelCase__ )
lowerCAmelCase__ = 10
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ = model(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
return sample
def __snake_case ( self : Dict ):
lowerCAmelCase__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowerCAmelCase__ = 50
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
lowerCAmelCase__ = model(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
lowerCAmelCase__ = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.2_574 ) < 1e-3
def __snake_case ( self : str ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def __snake_case ( self : List[str] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowerCAmelCase__ = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
lowerCAmelCase__ = self.full_loop(scheduler=lowerCAmelCase__ )
lowerCAmelCase__ = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1e-3
lowerCAmelCase__ = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase__ = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase__ = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase__ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase__ = self.full_loop(scheduler=lowerCAmelCase__ )
lowerCAmelCase__ = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1e-3
def __snake_case ( self : Optional[Any] ):
self.check_over_configs(thresholding=lowerCAmelCase__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , sample_max_value=lowerCAmelCase__ , algorithm_type='''dpmsolver++''' , solver_order=lowerCAmelCase__ , solver_type=lowerCAmelCase__ , )
def __snake_case ( self : List[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def __snake_case ( self : List[Any] ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCAmelCase__ , solver_type=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , algorithm_type=lowerCAmelCase__ , )
lowerCAmelCase__ = self.full_loop(
solver_order=lowerCAmelCase__ , solver_type=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , algorithm_type=lowerCAmelCase__ , )
assert not torch.isnan(lowerCAmelCase__ ).any(), "Samples have nan numbers"
def __snake_case ( self : List[Any] ):
self.check_over_configs(lower_order_final=lowerCAmelCase__ )
self.check_over_configs(lower_order_final=lowerCAmelCase__ )
def __snake_case ( self : int ):
self.check_over_configs(lambda_min_clipped=-float('''inf''' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def __snake_case ( self : Any ):
self.check_over_configs(variance_type=lowerCAmelCase__ )
self.check_over_configs(variance_type='''learned_range''' )
def __snake_case ( self : Optional[Any] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=lowerCAmelCase__ , time_step=0 )
def __snake_case ( self : List[Any] ):
lowerCAmelCase__ = self.full_loop()
lowerCAmelCase__ = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.2_791 ) < 1e-3
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.full_loop(use_karras_sigmas=lowerCAmelCase__ )
lowerCAmelCase__ = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.2_248 ) < 1e-3
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = self.full_loop(prediction_type='''v_prediction''' )
lowerCAmelCase__ = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.1_453 ) < 1e-3
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=lowerCAmelCase__ )
lowerCAmelCase__ = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 0.0_649 ) < 1e-3
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config(thresholding=lowerCAmelCase__ , dynamic_thresholding_ratio=0 )
lowerCAmelCase__ = scheduler_class(**lowerCAmelCase__ )
lowerCAmelCase__ = 10
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ = model(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
assert sample.dtype == torch.floataa
| 668 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = '''upernet'''
def __init__( self : Any , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : str=[1, 2, 3, 6] , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Dict=0.4 , lowerCAmelCase__ : int=384 , lowerCAmelCase__ : Union[str, Any]=256 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : List[str]=255 , **lowerCAmelCase__ : List[str] , ):
super().__init__(**lowerCAmelCase__)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
SCREAMING_SNAKE_CASE_: Dict = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = backbone_config.get("model_type")
SCREAMING_SNAKE_CASE_: str = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_: Tuple = config_class.from_dict(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = backbone_config
SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_: Dict = initializer_range
SCREAMING_SNAKE_CASE_: Any = pool_scales
SCREAMING_SNAKE_CASE_: Optional[Any] = use_auxiliary_head
SCREAMING_SNAKE_CASE_: str = auxiliary_loss_weight
SCREAMING_SNAKE_CASE_: List[Any] = auxiliary_in_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] = auxiliary_channels
SCREAMING_SNAKE_CASE_: Dict = auxiliary_num_convs
SCREAMING_SNAKE_CASE_: str = auxiliary_concat_input
SCREAMING_SNAKE_CASE_: Dict = loss_ignore_index
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Tuple = copy.deepcopy(self.__dict__)
SCREAMING_SNAKE_CASE_: int = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_: Optional[int] = self.__class__.model_type
return output
| 671 | 0 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _lowerCAmelCase ( __magic_name__ :Optional[int] = True , *__magic_name__ :Dict , **__magic_name__ :Any ):
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
UpperCAmelCase_ = False
if main_process_only:
UpperCAmelCase_ = PartialState().local_process_index == 0
return _tqdm(*_UpperCAmelCase , **_UpperCAmelCase , disable=_UpperCAmelCase )
| 121 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: List[str] = torch.nn.Linear(10 , 10)
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.optim.SGD(model.parameters() , 0.1)
SCREAMING_SNAKE_CASE_: Any = Accelerator()
SCREAMING_SNAKE_CASE_: List[str] = accelerator.prepare(lowerCAmelCase__)
try:
pickle.loads(pickle.dumps(lowerCAmelCase__))
except Exception as e:
self.fail(F"Accelerated optimizer pickling failed with {e}")
AcceleratorState._reset_state()
| 671 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowerCamelCase( UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, unittest.TestCase ):
lowercase_ : Optional[Any] = StableDiffusionInstructPixaPixPipeline
lowercase_ : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
lowercase_ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase_ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowercase_ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
torch.manual_seed(0)
_lowercase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=8, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, )
_lowercase : int = PNDMScheduler(skip_prk_steps=lowerCAmelCase__)
torch.manual_seed(0)
_lowercase : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
torch.manual_seed(0)
_lowercase : Dict = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=10_00, )
_lowercase : str = CLIPTextModel(lowerCAmelCase__)
_lowercase : Union[str, Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
_lowercase : Dict = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> int:
"""simple docstring"""
_lowercase : Optional[int] = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__)
_lowercase : List[Any] = image.cpu().permute(0, 2, 3, 1)[0]
_lowercase : Union[str, Any] = Image.fromarray(np.uinta(lowerCAmelCase__)).convert('RGB')
if str(lowerCAmelCase__).startswith('mps'):
_lowercase : int = torch.manual_seed(lowerCAmelCase__)
else:
_lowercase : Dict = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__)
_lowercase : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : int = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowercase : Optional[Any] = self.get_dummy_components()
_lowercase : Tuple = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__)
_lowercase : List[str] = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
_lowercase : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__)
_lowercase : Union[str, Any] = sd_pipe(**lowerCAmelCase__).images
_lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase : Optional[Any] = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowercase : int = self.get_dummy_components()
_lowercase : str = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__)
_lowercase : int = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
_lowercase : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__)
_lowercase : Dict = "french fries"
_lowercase : Optional[Any] = sd_pipe(**lowerCAmelCase__, negative_prompt=lowerCAmelCase__)
_lowercase : Tuple = output.images
_lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase : Optional[int] = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowercase : Union[str, Any] = self.get_dummy_components()
_lowercase : Tuple = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__)
_lowercase : Dict = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
_lowercase : List[Any] = self.get_dummy_inputs(lowerCAmelCase__)
_lowercase : Any = [inputs["prompt"]] * 2
_lowercase : Optional[int] = np.array(inputs['image']).astype(np.floataa) / 2_5_5.0
_lowercase : Any = torch.from_numpy(lowerCAmelCase__).unsqueeze(0).to(lowerCAmelCase__)
_lowercase : List[str] = image / 2 + 0.5
_lowercase : Tuple = image.permute(0, 3, 1, 2)
_lowercase : Dict = image.repeat(2, 1, 1, 1)
_lowercase : Dict = sd_pipe(**lowerCAmelCase__).images
_lowercase : str = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
_lowercase : Optional[Any] = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowercase : int = self.get_dummy_components()
_lowercase : Tuple = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, beta_schedule='scaled_linear')
_lowercase : List[str] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__)
_lowercase : Union[str, Any] = sd_pipe.to(lowerCAmelCase__)
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__)
_lowercase : str = self.get_dummy_inputs(lowerCAmelCase__)
_lowercase : Any = sd_pipe(**lowerCAmelCase__).images
_lowercase : Any = image[0, -3:, -3:, -1]
_lowercase : Optional[Any] = [round(lowerCAmelCase__, 4) for x in image_slice.flatten().tolist()]
print(','.join([str(lowerCAmelCase__) for x in slice]))
assert image.shape == (1, 32, 32, 3)
_lowercase : int = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : List[str] = self.get_dummy_components()
_lowercase : List[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__)
_lowercase : Tuple = VaeImageProcessor(do_resize=lowerCAmelCase__, do_normalize=lowerCAmelCase__)
_lowercase : Optional[int] = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
_lowercase : Any = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase__, input_image_type='pt'))[0]
_lowercase : Optional[int] = components["vae"]
_lowercase : Any = self.get_dummy_inputs_by_type(lowerCAmelCase__, input_image_type='pt')
for image_param in self.image_latents_params:
if image_param in inputs.keys():
_lowercase : List[str] = vae.encode(inputs[image_param]).latent_dist.mode()
_lowercase : str = pipe(**lowerCAmelCase__)[0]
_lowercase : List[Any] = np.abs(out - out_latents_inputs).max()
self.assertLess(lowerCAmelCase__, 1E-4, 'passing latents as image input generate different result from passing image')
@slow
@require_torch_gpu
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self, lowerCamelCase=0) -> Optional[int]:
"""simple docstring"""
_lowercase : List[str] = torch.manual_seed(lowerCAmelCase__)
_lowercase : str = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg')
_lowercase : List[Any] = {
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix', safety_checker=lowerCAmelCase__)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
_lowercase : int = self.get_inputs()
_lowercase : Any = pipe(**lowerCAmelCase__).images
_lowercase : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Any = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5])
assert np.abs(expected_slice - image_slice).max() < 1E-3
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix', safety_checker=lowerCAmelCase__)
_lowercase : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
_lowercase : Optional[Any] = self.get_inputs()
_lowercase : Optional[Any] = pipe(**lowerCAmelCase__).images
_lowercase : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : Dict = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1])
assert np.abs(expected_slice - image_slice).max() < 1E-3
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix', safety_checker=lowerCAmelCase__)
_lowercase : Union[str, Any] = DDIMScheduler.from_config(pipe.scheduler.config)
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
_lowercase : Optional[int] = self.get_inputs()
_lowercase : List[Any] = pipe(**lowerCAmelCase__).images
_lowercase : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
_lowercase : List[Any] = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3])
assert np.abs(expected_slice - image_slice).max() < 1E-3
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : Any = 0
def callback_fn(lowerCamelCase, lowerCamelCase, lowerCamelCase) -> None:
_lowercase : Tuple = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_lowercase : List[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_lowercase : Optional[int] = latents[0, -3:, -3:, -1]
_lowercase : Optional[Any] = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5E-2
elif step == 2:
_lowercase : str = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_lowercase : Optional[Any] = latents[0, -3:, -3:, -1]
_lowercase : str = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 5E-2
_lowercase : Optional[int] = False
_lowercase : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix', safety_checker=lowerCAmelCase__, torch_dtype=torch.floataa)
_lowercase : Optional[int] = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
_lowercase : int = self.get_inputs()
pipe(**lowerCAmelCase__, callback=lowerCAmelCase__, callback_steps=1)
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowercase : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix', safety_checker=lowerCAmelCase__, torch_dtype=torch.floataa)
_lowercase : Any = pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
_lowercase : List[str] = self.get_inputs()
_lowercase : Optional[Any] = pipe(**lowerCAmelCase__)
_lowercase : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : str = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
_lowercase : Dict = inputs["image"].resize((5_04, 5_04))
_lowercase : Tuple = "timbrooks/instruct-pix2pix"
_lowercase : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase__, safety_checker=lowerCAmelCase__, )
pipe.to(lowerCAmelCase__)
pipe.set_progress_bar_config(disable=lowerCAmelCase__)
pipe.enable_attention_slicing()
_lowercase : Union[str, Any] = pipe(**lowerCAmelCase__)
_lowercase : Tuple = output.images[0]
_lowercase : int = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
_lowercase : int = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-3
| 89 |
from itertools import count
def A_ ( _UpperCAmelCase = 50 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = [1] * min_block_length
for n in count(_UpperCAmelCase ):
fill_count_functions.append(1 )
for block_length in range(_UpperCAmelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 671 | 0 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCamelCase__ = 1.0_54_57_18_17E-34 # unit of ℏ : J * s
lowerCamelCase__ = 3E8 # unit of c : m * s^-1
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if force < 0:
raise ValueError("""Magnitude of force can not be negative""" )
if distance < 0:
raise ValueError("""Distance can not be negative""" )
if area < 0:
raise ValueError("""Area can not be negative""" )
if force == 0:
_UpperCamelCase = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_4_0 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_UpperCamelCase = (2_4_0 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_UpperCamelCase = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("""One and only one argument must be 0""" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 612 |
def A_ ( _UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError("only integers accepted as input" )
else:
SCREAMING_SNAKE_CASE_: List[Any] = str(abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_: Tuple = [list(_UpperCAmelCase ) for char in range(len(_UpperCAmelCase ) )]
for index in range(len(_UpperCAmelCase ) ):
num_transpositions[index].pop(_UpperCAmelCase )
return max(
int("".join(list(_UpperCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 671 | 0 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowercase__ = logging.getLogger(__name__)
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=_UpperCAmelCase , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=_UpperCAmelCase , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=_UpperCAmelCase , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=_UpperCAmelCase , default="data/dump" , help="The dump file prefix." )
lowerCAmelCase_ : Optional[Any] = parser.parse_args()
logger.info(f'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
lowerCAmelCase_ : str = BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase_ : Tuple = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
lowerCAmelCase_ : Dict = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCAmelCase_ : Tuple = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase_ : List[Any] = tokenizer.special_tokens_map["cls_token"] # `<s>`
lowerCAmelCase_ : int = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCAmelCase_ : List[Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase_ : int = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
lowerCAmelCase_ : str = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(f'''Loading text from {args.file_path}''' )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
lowerCAmelCase_ : Optional[Any] = fp.readlines()
logger.info("Start encoding" )
logger.info(f'''{len(_UpperCAmelCase )} examples to process.''' )
lowerCAmelCase_ : List[Any] = []
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Dict = 10000
lowerCAmelCase_ : str = time.time()
for text in data:
lowerCAmelCase_ : Any = f'''{bos} {text.strip()} {sep}'''
lowerCAmelCase_ : Optional[int] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
rslt.append(_UpperCAmelCase )
iter += 1
if iter % interval == 0:
lowerCAmelCase_ : Optional[int] = time.time()
logger.info(f'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
lowerCAmelCase_ : int = time.time()
logger.info("Finished binarization" )
logger.info(f'''{len(_UpperCAmelCase )} examples processed.''' )
lowerCAmelCase_ : str = f'''{args.dump_file}.{args.tokenizer_name}.pickle'''
lowerCAmelCase_ : Optional[Any] = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCAmelCase_ : Optional[int] = [np.uintaa(_UpperCAmelCase ) for d in rslt]
else:
lowerCAmelCase_ : Union[str, Any] = [np.intaa(_UpperCAmelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'''Dump to {dp_file}''' )
with open(_UpperCAmelCase , "wb" ) as handle:
pickle.dump(rslt_ , _UpperCAmelCase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 610 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __lowercase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : Any):
SCREAMING_SNAKE_CASE_: Any = data
SCREAMING_SNAKE_CASE_: Node | None = None
class __lowercase :
"""simple docstring"""
def __init__( self : int):
SCREAMING_SNAKE_CASE_: Dict = None
SCREAMING_SNAKE_CASE_: str = None
def __iter__( self : List[str]):
SCREAMING_SNAKE_CASE_: Tuple = self.head
while self.head:
yield node.data
SCREAMING_SNAKE_CASE_: List[str] = node.next
if node == self.head:
break
def __len__( self : Dict):
return sum(1 for _ in self)
def __repr__( self : Dict):
return "->".join(str(lowerCAmelCase__) for item in iter(self))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any):
self.insert_nth(len(self) , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any):
self.insert_nth(0 , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any):
if index < 0 or index > len(self):
raise IndexError("list index out of range.")
SCREAMING_SNAKE_CASE_: Any = Node(lowerCAmelCase__)
if self.head is None:
SCREAMING_SNAKE_CASE_: str = new_node # first node points itself
SCREAMING_SNAKE_CASE_: Optional[Any] = new_node
elif index == 0: # insert at head
SCREAMING_SNAKE_CASE_: Optional[Any] = self.head
SCREAMING_SNAKE_CASE_: str = new_node
else:
SCREAMING_SNAKE_CASE_: int = self.head
for _ in range(index - 1):
SCREAMING_SNAKE_CASE_: Optional[Any] = temp.next
SCREAMING_SNAKE_CASE_: List[str] = temp.next
SCREAMING_SNAKE_CASE_: int = new_node
if index == len(self) - 1: # insert at tail
SCREAMING_SNAKE_CASE_: Any = new_node
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return self.delete_nth(0)
def _SCREAMING_SNAKE_CASE ( self : Any):
return self.delete_nth(len(self) - 1)
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : int = 0):
if not 0 <= index < len(self):
raise IndexError("list index out of range.")
SCREAMING_SNAKE_CASE_: Optional[Any] = self.head
if self.head == self.tail: # just one node
SCREAMING_SNAKE_CASE_: List[str] = None
elif index == 0: # delete head node
SCREAMING_SNAKE_CASE_: int = self.tail.next.next
SCREAMING_SNAKE_CASE_: Tuple = self.head.next
else:
SCREAMING_SNAKE_CASE_: Optional[int] = self.head
for _ in range(index - 1):
SCREAMING_SNAKE_CASE_: Any = temp.next
SCREAMING_SNAKE_CASE_: Optional[Any] = temp.next
SCREAMING_SNAKE_CASE_: int = temp.next.next
if index == len(self) - 1: # delete at tail
SCREAMING_SNAKE_CASE_: int = temp
return delete_node.data
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return len(self) == 0
def A_ ( ):
SCREAMING_SNAKE_CASE_: Dict = CircularLinkedList()
assert len(_UpperCAmelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(_UpperCAmelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(_UpperCAmelCase ) == i
circular_linked_list.insert_nth(_UpperCAmelCase , i + 1 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any]=7 , _lowerCamelCase : Dict=3 , _lowerCamelCase : Union[str, Any]=1_8 , _lowerCamelCase : List[str]=3_0 , _lowerCamelCase : Optional[int]=4_0_0 , _lowerCamelCase : Dict=True , _lowerCamelCase : str=None , _lowerCamelCase : List[Any]=True , _lowerCamelCase : Tuple=None , ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = size if size is not None else {"shortest_edge": 2_0}
__lowerCamelCase : Any = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
__lowerCamelCase : str = parent
__lowerCamelCase : Optional[int] = batch_size
__lowerCamelCase : str = num_channels
__lowerCamelCase : Union[str, Any] = image_size
__lowerCamelCase : Dict = min_resolution
__lowerCamelCase : str = max_resolution
__lowerCamelCase : List[Any] = do_resize
__lowerCamelCase : List[Any] = size
__lowerCamelCase : Optional[Any] = do_center_crop
__lowerCamelCase : Union[str, Any] = crop_size
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _UpperCamelCase ( UpperCAmelCase_,unittest.TestCase ):
'''simple docstring'''
a_ : Optional[int] = MobileNetVaImageProcessor if is_vision_available() else None
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : List[Any] = MobileNetVaImageProcessingTester(self )
@property
def _snake_case ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self : List[Any] ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """crop_size""" ) )
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowerCamelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 2_0} )
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} )
__lowerCamelCase : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
pass
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
__lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCamelCase : Tuple = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
__lowerCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCamelCase : Union[str, Any] = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
__lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
__lowerCamelCase : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 519 |
from collections import defaultdict
from math import ceil, sqrt
def A_ ( _UpperCAmelCase = 1_00_00_00 , _UpperCAmelCase = 10 ):
SCREAMING_SNAKE_CASE_: defaultdict = defaultdict(_UpperCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
SCREAMING_SNAKE_CASE_: Tuple = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_UpperCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 671 | 0 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple=13 , _lowerCAmelCase : Optional[int]=7 , _lowerCAmelCase : str=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : str=True , _lowerCAmelCase : int=99 , _lowerCAmelCase : Optional[int]=32 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : int=37 , _lowerCAmelCase : str="gelu" , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : Union[str, Any]=512 , _lowerCAmelCase : Optional[Any]=16 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : Union[str, Any]=0.02 , _lowerCAmelCase : Dict=3 , _lowerCAmelCase : List[Any]=4 , _lowerCAmelCase : Optional[Any]=None , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = 13
SCREAMING_SNAKE_CASE_ = 7
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = 99
SCREAMING_SNAKE_CASE_ = 32
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 37
SCREAMING_SNAKE_CASE_ = "gelu"
SCREAMING_SNAKE_CASE_ = 0.1
SCREAMING_SNAKE_CASE_ = 0.1
SCREAMING_SNAKE_CASE_ = 512
SCREAMING_SNAKE_CASE_ = 16
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = 0.02
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = None
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCAmelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = TFRoFormerModel(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
SCREAMING_SNAKE_CASE_ = [input_ids, input_mask]
SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = TFRoFormerForCausalLM(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase__ )["logits"]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = TFRoFormerForMaskedLM(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = TFRoFormerForSequenceClassification(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.num_choices
SCREAMING_SNAKE_CASE_ = TFRoFormerForMultipleChoice(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE_ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Any ):
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = TFRoFormerForTokenClassification(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] ):
SCREAMING_SNAKE_CASE_ = TFRoFormerForQuestionAnswering(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE_
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase_ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = TFRoFormerModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def lowerCAmelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*lowerCAmelCase__ )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__ )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(lowerCAmelCase__ )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
SCREAMING_SNAKE_CASE_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase__ )[0]
# TODO Replace vocab size
SCREAMING_SNAKE_CASE_ = 50_000
SCREAMING_SNAKE_CASE_ = [1, 6, vocab_size]
self.assertEqual(output.shape , lowerCAmelCase__ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
SCREAMING_SNAKE_CASE_ = tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = 1e-4
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = tf.constant([[4, 10]] )
SCREAMING_SNAKE_CASE_ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
SCREAMING_SNAKE_CASE_ = emba(input_ids.shape )
SCREAMING_SNAKE_CASE_ = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , atol=self.tolerance )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
SCREAMING_SNAKE_CASE_ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
SCREAMING_SNAKE_CASE_ = emba.weight[:3, :5]
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , atol=self.tolerance )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = 1e-4
def lowerCAmelCase_ ( self : str ):
# 2,12,16,64
SCREAMING_SNAKE_CASE_ = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
SCREAMING_SNAKE_CASE_ = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
SCREAMING_SNAKE_CASE_ = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
SCREAMING_SNAKE_CASE_ = embed_positions([2, 16, 768] )[None, None, :, :]
SCREAMING_SNAKE_CASE_ = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
SCREAMING_SNAKE_CASE_ = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , lowerCAmelCase__ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , lowerCAmelCase__ , atol=self.tolerance ) | 31 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase : str = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 671 | 0 |
"""simple docstring"""
from PIL import Image
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level))
def contrast(_lowerCamelCase ) -> int:
return int(1_28 + factor * (c - 1_28) )
return img.point(_UpperCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
__A : int = change_contrast(img, 1_7_0)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 575 |
lowerCAmelCase : List[str] = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = set()
# keep track of all the paths to be checked
SCREAMING_SNAKE_CASE_: Tuple = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
SCREAMING_SNAKE_CASE_: List[Any] = queue.pop(0 )
# get the last node from the path
SCREAMING_SNAKE_CASE_: Tuple = path[-1]
if node not in explored:
SCREAMING_SNAKE_CASE_: Union[str, Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
SCREAMING_SNAKE_CASE_: int = list(_UpperCAmelCase )
new_path.append(_UpperCAmelCase )
queue.append(_UpperCAmelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_UpperCAmelCase )
# in case there's no path between the 2 nodes
return []
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
SCREAMING_SNAKE_CASE_: List[Any] = [start]
SCREAMING_SNAKE_CASE_: List[str] = set(_UpperCAmelCase )
# Keep tab on distances from `start` node.
SCREAMING_SNAKE_CASE_: Union[str, Any] = {start: 0, target: -1}
while queue:
SCREAMING_SNAKE_CASE_: Dict = queue.pop(0 )
if node == target:
SCREAMING_SNAKE_CASE_: Tuple = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_UpperCAmelCase )
queue.append(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
| 671 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_a : Tuple = logging.get_logger(__name__)
_a : Optional[Any] = ["""model.decoder.embed_positions.weights"""]
def _lowercase ( lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
if "emb" in name:
__UpperCAmelCase : Optional[int] = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
__UpperCAmelCase : Optional[int] = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
__UpperCAmelCase : Tuple = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
__UpperCAmelCase : str = name.replace("linear1" , "fc1" )
if "linear2" in name:
__UpperCAmelCase : Optional[Any] = name.replace("linear2" , "fc2" )
if "norm1" in name:
__UpperCAmelCase : List[str] = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
__UpperCAmelCase : Dict = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
__UpperCAmelCase : Tuple = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
__UpperCAmelCase : Optional[int] = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
__UpperCAmelCase : List[str] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
__UpperCAmelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : List[str] = list(state_dict.keys() )
__UpperCAmelCase : Dict = {}
for key in keys:
__UpperCAmelCase : int = state_dict.pop(_UpperCAmelCase )
__UpperCAmelCase : str = rename_keys(_UpperCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
__UpperCAmelCase : List[str] = val[:hidden_size, :]
__UpperCAmelCase : Optional[Any] = val[hidden_size : 2 * hidden_size, :]
__UpperCAmelCase : Optional[int] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__UpperCAmelCase : str = val
else:
__UpperCAmelCase : str = val
return state_dict, enc_dec_proj_state_dict
def _lowercase ( lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
if checkpoint == "small":
# default config values
__UpperCAmelCase : Any = 1024
__UpperCAmelCase : Union[str, Any] = 24
__UpperCAmelCase : List[str] = 16
elif checkpoint == "medium":
__UpperCAmelCase : Optional[Any] = 1536
__UpperCAmelCase : int = 48
__UpperCAmelCase : Any = 24
elif checkpoint == "large":
__UpperCAmelCase : int = 2048
__UpperCAmelCase : List[Any] = 48
__UpperCAmelCase : List[Any] = 32
else:
raise ValueError(f"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
__UpperCAmelCase : int = MusicgenDecoderConfig(
hidden_size=_UpperCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , )
return config
@torch.no_grad()
def _lowercase ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="cpu" ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : int = MusicGen.get_pretrained(_UpperCAmelCase , device=_UpperCAmelCase )
__UpperCAmelCase : List[Any] = decoder_config_from_checkpoint(_UpperCAmelCase )
__UpperCAmelCase : Any = fairseq_model.lm.state_dict()
__UpperCAmelCase : List[Any] = rename_state_dict(
_UpperCAmelCase , hidden_size=decoder_config.hidden_size )
__UpperCAmelCase : Any = TaEncoderModel.from_pretrained("t5-base" )
__UpperCAmelCase : Optional[Any] = EncodecModel.from_pretrained("facebook/encodec_32khz" )
__UpperCAmelCase : Tuple = MusicgenForCausalLM(_UpperCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__UpperCAmelCase : int = decoder.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" )
if len(_UpperCAmelCase ) > 0:
raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
__UpperCAmelCase : Dict = MusicgenForConditionalGeneration(text_encoder=_UpperCAmelCase , audio_encoder=_UpperCAmelCase , decoder=_UpperCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_UpperCAmelCase )
# check we can do a forward pass
__UpperCAmelCase : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__UpperCAmelCase : List[str] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
__UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("t5-base" )
__UpperCAmelCase : Any = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
__UpperCAmelCase : List[Any] = MusicgenProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# set the appropriate bos/pad token ids
__UpperCAmelCase : Optional[int] = 2048
__UpperCAmelCase : str = 2048
# set other default generation config params
__UpperCAmelCase : Optional[int] = int(30 * audio_encoder.config.frame_rate )
__UpperCAmelCase : str = True
__UpperCAmelCase : List[str] = 3.0
if pytorch_dump_folder is not None:
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
if repo_id:
logger.info(f"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(_UpperCAmelCase )
processor.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
_a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
_a : Union[str, Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 168 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : float):
return 0.0
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
SCREAMING_SNAKE_CASE_: Dict = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = 5_12
SCREAMING_SNAKE_CASE_: str = [1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE_: Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs]
SCREAMING_SNAKE_CASE_: Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE_: Tuple = np.abs(np.fft.fft(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_: Optional[Any] = 20 * np.logaa(_UpperCAmelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
SCREAMING_SNAKE_CASE_: Any = get_bounds(_UpperCAmelCase , _UpperCAmelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(_UpperCAmelCase )
plt.show()
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = 5_12
SCREAMING_SNAKE_CASE_: Union[str, Any] = [1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE_: Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs]
SCREAMING_SNAKE_CASE_: int = [0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE_: Any = np.angle(np.fft.fft(_UpperCAmelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(_UpperCAmelCase , -2 * pi ) )
plt.show()
| 671 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 417 |
from __future__ import annotations
from math import ceil, floor, sqrt
def A_ ( _UpperCAmelCase = 2_00_00_00 ):
SCREAMING_SNAKE_CASE_: list[int] = [0]
SCREAMING_SNAKE_CASE_: int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
SCREAMING_SNAKE_CASE_: int = 0
# the area corresponding to the grid that gives the product closest to target
SCREAMING_SNAKE_CASE_: int = 0
# an estimate of b, using the quadratic formula
SCREAMING_SNAKE_CASE_: float
# the largest integer less than b_estimate
SCREAMING_SNAKE_CASE_: int
# the largest integer less than b_estimate
SCREAMING_SNAKE_CASE_: int
# the triangle number corresponding to b_floor
SCREAMING_SNAKE_CASE_: int
# the triangle number corresponding to b_ceil
SCREAMING_SNAKE_CASE_: int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
SCREAMING_SNAKE_CASE_: List[Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
SCREAMING_SNAKE_CASE_: Any = floor(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = ceil(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Any = triangle_numbers[b_floor]
SCREAMING_SNAKE_CASE_: List[Any] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
SCREAMING_SNAKE_CASE_: int = triangle_b_first_guess * triangle_a
SCREAMING_SNAKE_CASE_: int = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
SCREAMING_SNAKE_CASE_: Optional[Any] = triangle_b_second_guess * triangle_a
SCREAMING_SNAKE_CASE_: Tuple = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'''{solution() = }''')
| 671 | 0 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] ):
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertAlmostEqual(lowerCAmelCase__ , lowerCAmelCase__ , delta=lowerCAmelCase__ )
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : List[str] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(lowerCAmelCase__ ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def lowerCamelCase__ ( self : int ):
__lowerCamelCase : Union[str, Any] = None
ops.enable_eager_execution_internal()
__lowerCamelCase : int = tf.config.list_physical_devices("CPU" )
if len(lowerCAmelCase__ ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__lowerCamelCase : List[Any] = tf.config.list_logical_devices(device_type="CPU" )
__lowerCamelCase : Union[str, Any] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__lowerCamelCase : Union[str, Any] = GradientAccumulator()
__lowerCamelCase : List[str] = tf.Variable([4.0, 3.0] )
__lowerCamelCase : Optional[int] = create_optimizer(5E-5 , 10 , 5 )
__lowerCamelCase : Any = tf.Variable([0.0, 0.0] , trainable=lowerCAmelCase__ )
def accumulate_on_replica(UpperCAmelCase : Optional[Any] ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(UpperCAmelCase : Optional[int] , UpperCAmelCase : str ):
with strategy.scope():
__lowerCamelCase : List[str] = strategy.experimental_local_results(lowerCAmelCase__ )
local_variables[0].assign(lowerCAmelCase__ )
local_variables[1].assign(lowerCAmelCase__ )
strategy.run(lowerCAmelCase__ , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(lowerCAmelCase__ )
def _check_local_values(UpperCAmelCase : Dict , UpperCAmelCase : Tuple ):
__lowerCamelCase : Optional[Any] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , lowerCAmelCase__ , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , lowerCAmelCase__ , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] ) | 646 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Optional[int] = {
"""configuration_longformer""": [
"""LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LongformerConfig""",
"""LongformerOnnxConfig""",
],
"""tokenization_longformer""": ["""LongformerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = ["""LongformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = [
"""LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongformerForMaskedLM""",
"""LongformerForMultipleChoice""",
"""LongformerForQuestionAnswering""",
"""LongformerForSequenceClassification""",
"""LongformerForTokenClassification""",
"""LongformerModel""",
"""LongformerPreTrainedModel""",
"""LongformerSelfAttention""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
"""TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLongformerForMaskedLM""",
"""TFLongformerForMultipleChoice""",
"""TFLongformerForQuestionAnswering""",
"""TFLongformerForSequenceClassification""",
"""TFLongformerForTokenClassification""",
"""TFLongformerModel""",
"""TFLongformerPreTrainedModel""",
"""TFLongformerSelfAttention""",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 671 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_UpperCAmelCase : Optional[int] = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
_UpperCAmelCase : Optional[int] = {
"""bert-base-uncased""": 512,
"""bert-large-uncased""": 512,
"""bert-base-cased""": 512,
"""bert-large-cased""": 512,
"""bert-base-multilingual-uncased""": 512,
"""bert-base-multilingual-cased""": 512,
"""bert-base-chinese""": 512,
"""bert-base-german-cased""": 512,
"""bert-large-uncased-whole-word-masking""": 512,
"""bert-large-cased-whole-word-masking""": 512,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 512,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 512,
"""bert-base-cased-finetuned-mrpc""": 512,
"""bert-base-german-dbmdz-cased""": 512,
"""bert-base-german-dbmdz-uncased""": 512,
"""TurkuNLP/bert-base-finnish-cased-v1""": 512,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 512,
"""wietsedv/bert-base-dutch-cased""": 512,
}
_UpperCAmelCase : str = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class lowerCAmelCase_ ( UpperCAmelCase_ ):
UpperCamelCase_ :Dict = VOCAB_FILES_NAMES
UpperCamelCase_ :Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :str = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ :int = BertTokenizer
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : List[str]="[UNK]" , SCREAMING_SNAKE_CASE_ : str="[SEP]" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="[PAD]" , SCREAMING_SNAKE_CASE_ : Optional[int]="[CLS]" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="[MASK]" , SCREAMING_SNAKE_CASE_ : Optional[int]=True , SCREAMING_SNAKE_CASE_ : Tuple=None , **SCREAMING_SNAKE_CASE_ : List[str] , ):
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
lowerCAmelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase__ ) != tokenize_chinese_chars
):
lowerCAmelCase__ = getattr(lowerCAmelCase__ , normalizer_state.pop('''type''' ) )
lowerCAmelCase__ = do_lower_case
lowerCAmelCase__ = strip_accents
lowerCAmelCase__ = tokenize_chinese_chars
lowerCAmelCase__ = normalizer_class(**lowerCAmelCase__ )
lowerCAmelCase__ = do_lower_case
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any=None ):
lowerCAmelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
lowerCAmelCase__ = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 668 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase : Optional[int] = [
# (stable-diffusion, HF Diffusers)
("""time_embed.0.weight""", """time_embedding.linear_1.weight"""),
("""time_embed.0.bias""", """time_embedding.linear_1.bias"""),
("""time_embed.2.weight""", """time_embedding.linear_2.weight"""),
("""time_embed.2.bias""", """time_embedding.linear_2.bias"""),
("""input_blocks.0.0.weight""", """conv_in.weight"""),
("""input_blocks.0.0.bias""", """conv_in.bias"""),
("""out.0.weight""", """conv_norm_out.weight"""),
("""out.0.bias""", """conv_norm_out.bias"""),
("""out.2.weight""", """conv_out.weight"""),
("""out.2.bias""", """conv_out.bias"""),
]
lowerCAmelCase : str = [
# (stable-diffusion, HF Diffusers)
("""in_layers.0""", """norm1"""),
("""in_layers.2""", """conv1"""),
("""out_layers.0""", """norm2"""),
("""out_layers.3""", """conv2"""),
("""emb_layers.1""", """time_emb_proj"""),
("""skip_connection""", """conv_shortcut"""),
]
lowerCAmelCase : List[str] = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase : int = f'''down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase : List[str] = f'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase : Any = f'''down_blocks.{i}.attentions.{j}.'''
lowerCAmelCase : List[Any] = f'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase : Any = f'''up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase : str = f'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase : List[Any] = f'''up_blocks.{i}.attentions.{j}.'''
lowerCAmelCase : str = f'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase : Any = f'''down_blocks.{i}.downsamplers.0.conv.'''
lowerCAmelCase : Tuple = f'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase : Tuple = f'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase : Tuple = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase : Any = """mid_block.attentions.0."""
lowerCAmelCase : Dict = """middle_block.1."""
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase : int = f'''mid_block.resnets.{j}.'''
lowerCAmelCase : Union[str, Any] = f'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def A_ ( _UpperCAmelCase ):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
SCREAMING_SNAKE_CASE_: Dict = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
SCREAMING_SNAKE_CASE_: Optional[int] = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
SCREAMING_SNAKE_CASE_: Any = v.replace(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: str = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
SCREAMING_SNAKE_CASE_: Optional[Any] = v.replace(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = v
SCREAMING_SNAKE_CASE_: Optional[Any] = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase : Union[str, Any] = [
# (stable-diffusion, HF Diffusers)
("""nin_shortcut""", """conv_shortcut"""),
("""norm_out""", """conv_norm_out"""),
("""mid.attn_1.""", """mid_block.attentions.0."""),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase : Union[str, Any] = f'''encoder.down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase : Optional[Any] = f'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase : Dict = f'''down_blocks.{i}.downsamplers.0.'''
lowerCAmelCase : List[str] = f'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase : List[str] = f'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase : int = f'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase : Any = f'''decoder.up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase : int = f'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase : str = f'''mid_block.resnets.{i}.'''
lowerCAmelCase : Tuple = f'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase : List[Any] = [
# (stable-diffusion, HF Diffusers)
("""norm.""", """group_norm."""),
("""q.""", """query."""),
("""k.""", """key."""),
("""v.""", """value."""),
("""proj_out.""", """proj_attn."""),
]
def A_ ( _UpperCAmelCase ):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
SCREAMING_SNAKE_CASE_: Union[str, Any] = v.replace(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
SCREAMING_SNAKE_CASE_: Any = v.replace(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = v
SCREAMING_SNAKE_CASE_: Tuple = {v: vae_state_dict[k] for k, v in mapping.items()}
SCREAMING_SNAKE_CASE_: Union[str, Any] = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"mid.attn_1.{weight_name}.weight" in k:
print(f"Reshaping {k} for SD format" )
SCREAMING_SNAKE_CASE_: List[str] = reshape_weight_for_sd(_UpperCAmelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase : Optional[Any] = [
# (stable-diffusion, HF Diffusers)
("""resblocks.""", """text_model.encoder.layers."""),
("""ln_1""", """layer_norm1"""),
("""ln_2""", """layer_norm2"""),
(""".c_fc.""", """.fc1."""),
(""".c_proj.""", """.fc2."""),
(""".attn""", """.self_attn"""),
("""ln_final.""", """transformer.text_model.final_layer_norm."""),
("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""),
("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""),
]
lowerCAmelCase : Optional[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase : Optional[int] = re.compile("""|""".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase : str = {"""q""": 0, """k""": 1, """v""": 2}
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: List[str] = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
SCREAMING_SNAKE_CASE_: str = k[: -len(".q_proj.weight" )]
SCREAMING_SNAKE_CASE_: Dict = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
SCREAMING_SNAKE_CASE_: Tuple = [None, None, None]
SCREAMING_SNAKE_CASE_: Union[str, Any] = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
SCREAMING_SNAKE_CASE_: Union[str, Any] = k[: -len(".q_proj.bias" )]
SCREAMING_SNAKE_CASE_: Any = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
SCREAMING_SNAKE_CASE_: List[Any] = [None, None, None]
SCREAMING_SNAKE_CASE_: List[str] = v
continue
SCREAMING_SNAKE_CASE_: int = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
SCREAMING_SNAKE_CASE_: str = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = torch.cat(_UpperCAmelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
SCREAMING_SNAKE_CASE_: Optional[int] = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = torch.cat(_UpperCAmelCase )
return new_state_dict
def A_ ( _UpperCAmelCase ):
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt."""
)
lowerCAmelCase : Optional[Any] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase : int = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""")
lowerCAmelCase : List[str] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""")
lowerCAmelCase : Optional[int] = osp.join(args.model_path, """text_encoder""", """model.safetensors""")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase : Optional[int] = load_file(unet_path, device="""cpu""")
else:
lowerCAmelCase : Union[str, Any] = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""")
lowerCAmelCase : Optional[Any] = torch.load(unet_path, map_location="""cpu""")
if osp.exists(vae_path):
lowerCAmelCase : str = load_file(vae_path, device="""cpu""")
else:
lowerCAmelCase : List[Any] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""")
lowerCAmelCase : Optional[Any] = torch.load(vae_path, map_location="""cpu""")
if osp.exists(text_enc_path):
lowerCAmelCase : List[Any] = load_file(text_enc_path, device="""cpu""")
else:
lowerCAmelCase : List[Any] = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""")
lowerCAmelCase : Optional[Any] = torch.load(text_enc_path, map_location="""cpu""")
# Convert the UNet model
lowerCAmelCase : int = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase : Optional[int] = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase : Union[str, Any] = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase : Optional[int] = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase : Any = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase : Any = {"""transformer.""" + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase : str = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase : Dict = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase : Any = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase : Optional[Any] = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase : Union[str, Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase : str = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase : int = {"""state_dict""": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 671 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
_lowerCamelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCamelCase : int = {
"""vocab_file""": {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""unc-nlp/lxmert-base-uncased""": (
"""https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"""
),
},
}
_lowerCamelCase : List[Any] = {
"""unc-nlp/lxmert-base-uncased""": 512,
}
_lowerCamelCase : Tuple = {
"""unc-nlp/lxmert-base-uncased""": {"""do_lower_case""": True},
}
class snake_case__ ( UpperCAmelCase_ ):
'''simple docstring'''
__A = VOCAB_FILES_NAMES
__A = PRETRAINED_VOCAB_FILES_MAP
__A = PRETRAINED_INIT_CONFIGURATION
__A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A = LxmertTokenizer
def __init__( self : Tuple , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Optional[int]="[UNK]" , lowerCAmelCase_ : Union[str, Any]="[SEP]" , lowerCAmelCase_ : Optional[Any]="[PAD]" , lowerCAmelCase_ : Optional[int]="[CLS]" , lowerCAmelCase_ : int="[MASK]" , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : Tuple=None , **lowerCAmelCase_ : int , ) -> Tuple:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase__ ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCAmelCase__ , normalizer_state.pop('''type''' ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCAmelCase__ )
UpperCAmelCase_ = do_lower_case
def UpperCamelCase ( self : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any]=None ) -> Union[str, Any]:
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase ( self : List[Any] , lowerCAmelCase_ : List[int] , lowerCAmelCase_ : Optional[List[int]] = None ) -> int:
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> List[Any]:
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 121 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"""
),
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = '''xlm-prophetnet'''
_UpperCAmelCase : Any = ['''past_key_values''']
_UpperCAmelCase : Tuple = {
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self : str , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[Union[str, Callable]] = "gelu" , lowerCAmelCase__ : Optional[int] = 3_0522 , lowerCAmelCase__ : Optional[int] = 1024 , lowerCAmelCase__ : Optional[int] = 4096 , lowerCAmelCase__ : Optional[int] = 12 , lowerCAmelCase__ : Optional[int] = 16 , lowerCAmelCase__ : Optional[int] = 4096 , lowerCAmelCase__ : Optional[int] = 12 , lowerCAmelCase__ : Optional[int] = 16 , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[int] = 512 , lowerCAmelCase__ : Optional[float] = 0.02 , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[int] = 0 , lowerCAmelCase__ : Optional[int] = 2 , lowerCAmelCase__ : Optional[int] = 32 , lowerCAmelCase__ : Optional[int] = 128 , lowerCAmelCase__ : Optional[bool] = False , lowerCAmelCase__ : Optional[float] = 0.0 , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[int] = 0 , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 2 , **lowerCAmelCase__ : List[str] , ):
SCREAMING_SNAKE_CASE_: List[Any] = vocab_size
SCREAMING_SNAKE_CASE_: int = hidden_size
SCREAMING_SNAKE_CASE_: Any = encoder_ffn_dim
SCREAMING_SNAKE_CASE_: Tuple = num_encoder_layers
SCREAMING_SNAKE_CASE_: List[Any] = num_encoder_attention_heads
SCREAMING_SNAKE_CASE_: Dict = decoder_ffn_dim
SCREAMING_SNAKE_CASE_: Any = num_decoder_layers
SCREAMING_SNAKE_CASE_: Tuple = num_decoder_attention_heads
SCREAMING_SNAKE_CASE_: str = max_position_embeddings
SCREAMING_SNAKE_CASE_: str = init_std # Normal(0, this parameter)
SCREAMING_SNAKE_CASE_: Dict = activation_function
# parameters for xlmprophetnet
SCREAMING_SNAKE_CASE_: Optional[int] = ngram
SCREAMING_SNAKE_CASE_: Tuple = num_buckets
SCREAMING_SNAKE_CASE_: Union[str, Any] = relative_max_distance
SCREAMING_SNAKE_CASE_: List[str] = disable_ngram_loss
SCREAMING_SNAKE_CASE_: Dict = eps
# 3 Types of Dropout
SCREAMING_SNAKE_CASE_: Any = attention_dropout
SCREAMING_SNAKE_CASE_: Optional[int] = activation_dropout
SCREAMING_SNAKE_CASE_: str = dropout
SCREAMING_SNAKE_CASE_: Optional[int] = use_cache
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , add_cross_attention=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Any):
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`.")
| 671 | 0 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=6, lowerCamelCase=17, lowerCamelCase=23, lowerCamelCase=11, lowerCamelCase=True, ) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = parent
_lowercase : Dict = batch_size
_lowercase : List[Any] = seq_length
_lowercase : Tuple = act_dim
_lowercase : Dict = state_dim
_lowercase : Union[str, Any] = hidden_size
_lowercase : Any = max_length
_lowercase : Optional[int] = is_training
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = floats_tensor((self.batch_size, self.seq_length, self.state_dim))
_lowercase : int = floats_tensor((self.batch_size, self.seq_length, self.act_dim))
_lowercase : int = floats_tensor((self.batch_size, self.seq_length, 1))
_lowercase : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1))
_lowercase : int = ids_tensor((self.batch_size, self.seq_length), vocab_size=10_00)
_lowercase : Optional[Any] = random_attention_mask((self.batch_size, self.seq_length))
_lowercase : Tuple = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size, seq_length=self.seq_length, act_dim=self.act_dim, state_dim=self.state_dim, hidden_size=self.hidden_size, max_length=self.max_length, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Optional[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = DecisionTransformerModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
_lowercase : int = model(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
self.parent.assertEqual(result.state_preds.shape, states.shape)
self.parent.assertEqual(result.action_preds.shape, actions.shape)
self.parent.assertEqual(result.return_preds.shape, returns_to_go.shape)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.seq_length * 3, self.hidden_size)) # seq length *3 as there are 3 modelities: states, returns and actions
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : str = self.prepare_config_and_inputs()
(
_lowercase
) : Union[str, Any] = config_and_inputs
_lowercase : Optional[Any] = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class _lowerCamelCase( UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, unittest.TestCase ):
lowercase_ : Union[str, Any] = (DecisionTransformerModel,) if is_torch_available() else ()
lowercase_ : Dict = ()
lowercase_ : Optional[Any] = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowercase_ : Optional[int] = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowercase_ : Tuple = False
lowercase_ : Any = False
lowercase_ : Tuple = False
lowercase_ : int = False
lowercase_ : Tuple = False
lowercase_ : Optional[int] = False
lowercase_ : Union[str, Any] = False
lowercase_ : List[str] = False
lowercase_ : int = False
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Optional[int] = DecisionTransformerModelTester(self)
_lowercase : Optional[Any] = ConfigTester(self, config_class=lowerCAmelCase__, hidden_size=37)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
@slow
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Optional[int] = DecisionTransformerModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Union[str, Any] = model_class(lowerCAmelCase__)
_lowercase : List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : Tuple = [*signature.parameters.keys()]
_lowercase : Optional[Any] = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(lowerCAmelCase__)], lowerCAmelCase__)
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Any = 2 # number of steps of autoregressive prediction we will perform
_lowercase : str = 10 # defined by the RL environment, may be normalized
_lowercase : str = DecisionTransformerModel.from_pretrained('edbeeching/decision-transformer-gym-hopper-expert')
_lowercase : Optional[int] = model.to(lowerCAmelCase__)
_lowercase : Union[str, Any] = model.config
torch.manual_seed(0)
_lowercase : Union[str, Any] = torch.randn(1, 1, config.state_dim).to(device=lowerCAmelCase__, dtype=torch.floataa) # env.reset()
_lowercase : Union[str, Any] = torch.tensor(
[[0.2_4_2_7_9_3, -0.2_8_6_9_3_0_7_4, 0.8_7_4_2_6_1_3], [0.6_7_8_1_5_2_7_4, -0.0_8_1_0_1_0_8_5, -0.1_2_9_5_2_1_4_7]], device=lowerCAmelCase__)
_lowercase : Union[str, Any] = torch.tensor(lowerCAmelCase__, device=lowerCAmelCase__, dtype=torch.floataa).reshape(1, 1, 1)
_lowercase : Any = state
_lowercase : Dict = torch.zeros(1, 0, config.act_dim, device=lowerCAmelCase__, dtype=torch.floataa)
_lowercase : str = torch.zeros(1, 0, device=lowerCAmelCase__, dtype=torch.floataa)
_lowercase : List[str] = torch.tensor(0, device=lowerCAmelCase__, dtype=torch.long).reshape(1, 1)
for step in range(lowerCAmelCase__):
_lowercase : Dict = torch.cat([actions, torch.zeros(1, 1, config.act_dim, device=lowerCAmelCase__)], dim=1)
_lowercase : Optional[Any] = torch.cat([rewards, torch.zeros(1, 1, device=lowerCAmelCase__)], dim=1)
_lowercase : str = torch.ones(1, states.shape[1]).to(dtype=torch.long, device=states.device)
with torch.no_grad():
_lowercase : int = model(
states=lowerCAmelCase__, actions=lowerCAmelCase__, rewards=lowerCAmelCase__, returns_to_go=lowerCAmelCase__, timesteps=lowerCAmelCase__, attention_mask=lowerCAmelCase__, return_dict=lowerCAmelCase__, )
self.assertEqual(action_pred.shape, actions.shape)
self.assertTrue(torch.allclose(action_pred[0, -1], expected_outputs[step], atol=1E-4))
_lowercase : Dict = ( # env.step(action)
torch.randn(1, 1, config.state_dim).to(device=lowerCAmelCase__, dtype=torch.floataa),
1.0,
False,
{},
)
_lowercase : Any = action_pred[0, -1]
_lowercase : Tuple = torch.cat([states, state], dim=1)
_lowercase : str = returns_to_go[0, -1] - reward
_lowercase : Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1, 1, 1)], dim=1)
_lowercase : str = torch.cat(
[timesteps, torch.ones((1, 1), device=lowerCAmelCase__, dtype=torch.long) * (step + 1)], dim=1)
| 89 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Dict = logging.get_logger(__name__)
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = b.T
SCREAMING_SNAKE_CASE_: Dict = np.sum(np.square(_UpperCAmelCase ) , axis=1 )
SCREAMING_SNAKE_CASE_: Tuple = np.sum(np.square(_UpperCAmelCase ) , axis=0 )
SCREAMING_SNAKE_CASE_: List[Any] = np.matmul(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = aa[:, None] - 2 * ab + ba[None, :]
return d
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = x.reshape(-1 , 3 )
SCREAMING_SNAKE_CASE_: Tuple = squared_euclidean_distance(_UpperCAmelCase , _UpperCAmelCase )
return np.argmin(_UpperCAmelCase , axis=1 )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : int = ['''pixel_values''']
def __init__( self : Tuple , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : List[str] , ):
super().__init__(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = size if size is not None else {"height": 256, "width": 256}
SCREAMING_SNAKE_CASE_: Tuple = get_size_dict(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = np.array(lowerCAmelCase__) if clusters is not None else None
SCREAMING_SNAKE_CASE_: Dict = do_resize
SCREAMING_SNAKE_CASE_: str = size
SCREAMING_SNAKE_CASE_: List[Any] = resample
SCREAMING_SNAKE_CASE_: Optional[int] = do_normalize
SCREAMING_SNAKE_CASE_: Dict = do_color_quantize
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ):
SCREAMING_SNAKE_CASE_: List[str] = get_size_dict(lowerCAmelCase__)
if "height" not in size or "width" not in size:
raise ValueError(F"Size dictionary must contain both height and width keys. Got {size.keys()}")
return resize(
lowerCAmelCase__ , size=(size["height"], size["width"]) , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , ):
SCREAMING_SNAKE_CASE_: str = rescale(image=lowerCAmelCase__ , scale=1 / 127.5 , data_format=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = image - 1
return image
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **lowerCAmelCase__ : Union[str, Any] , ):
SCREAMING_SNAKE_CASE_: Tuple = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_: Optional[int] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_: Dict = get_size_dict(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_: int = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_: List[str] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
SCREAMING_SNAKE_CASE_: Tuple = clusters if clusters is not None else self.clusters
SCREAMING_SNAKE_CASE_: Optional[int] = np.array(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True.")
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_: Union[str, Any] = [to_numpy_array(lowerCAmelCase__) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_: Optional[Any] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_: str = [self.normalize(image=lowerCAmelCase__) for image in images]
if do_color_quantize:
SCREAMING_SNAKE_CASE_: Any = [to_channel_dimension_format(lowerCAmelCase__ , ChannelDimension.LAST) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
SCREAMING_SNAKE_CASE_: List[Any] = np.array(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = color_quantize(lowerCAmelCase__ , lowerCAmelCase__).reshape(images.shape[:-1])
# flatten to (batch_size, height*width)
SCREAMING_SNAKE_CASE_: str = images.shape[0]
SCREAMING_SNAKE_CASE_: Tuple = images.reshape(lowerCAmelCase__ , -1)
# We need to convert back to a list of images to keep consistent behaviour across processors.
SCREAMING_SNAKE_CASE_: str = list(lowerCAmelCase__)
else:
SCREAMING_SNAKE_CASE_: Dict = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__) for image in images]
SCREAMING_SNAKE_CASE_: Optional[Any] = {"input_ids": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__)
| 671 | 0 |
import math
def __A(lowerCAmelCase ) -> str:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __A(lowerCAmelCase = 1_0_0_0_1 ) -> Tuple:
"""simple docstring"""
try:
_UpperCamelCase = int(_UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError("""Parameter nth must be int or castable to int.""" ) from None
if nth <= 0:
raise ValueError("""Parameter nth must be greater than or equal to one.""" )
_UpperCamelCase = []
_UpperCamelCase = 2
while len(_UpperCAmelCase ) < nth:
if is_prime(_UpperCAmelCase ):
primes.append(_UpperCAmelCase )
num += 1
else:
num += 1
return primes[len(_UpperCAmelCase ) - 1]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 612 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Tuple = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : int = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
lowerCAmelCase : int = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
lowerCAmelCase : List[Any] = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
lowerCAmelCase : Optional[int] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowerCAmelCase : Optional[int] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowerCAmelCase : List[str] = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Any = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : str = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase : List[Any] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
lowerCAmelCase : Optional[Any] = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
lowerCAmelCase : int = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase_ )
class __lowercase :
"""simple docstring"""
def __call__( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[bool] = None , **lowerCAmelCase__ : Tuple , ):
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE_: List[str] = titles if texts is None else texts
return super().__call__(
lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Optional[int] = titles if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [titles]
SCREAMING_SNAKE_CASE_: int = texts if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [texts]
SCREAMING_SNAKE_CASE_: str = len(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = questions if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [questions] * n_passages
if len(lowerCAmelCase__) != len(lowerCAmelCase__):
raise ValueError(
F"There should be as many titles than texts but got {len(lowerCAmelCase__)} titles and {len(lowerCAmelCase__)} texts.")
SCREAMING_SNAKE_CASE_: Optional[Any] = super().__call__(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)["input_ids"]
SCREAMING_SNAKE_CASE_: Union[str, Any] = super().__call__(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)["input_ids"]
SCREAMING_SNAKE_CASE_: int = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase__ , lowerCAmelCase__)
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE_: Dict = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
SCREAMING_SNAKE_CASE_: int = attention_mask
return self.pad(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : BatchEncoding , lowerCAmelCase__ : DPRReaderOutput , lowerCAmelCase__ : int = 16 , lowerCAmelCase__ : int = 64 , lowerCAmelCase__ : int = 4 , ):
SCREAMING_SNAKE_CASE_: int = reader_input["input_ids"]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = reader_output[:3]
SCREAMING_SNAKE_CASE_: Tuple = len(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = sorted(range(lowerCAmelCase__) , reverse=lowerCAmelCase__ , key=relevance_logits.__getitem__)
SCREAMING_SNAKE_CASE_: List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE_: Optional[int] = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE_: str = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE_: List[Any] = sequence_ids.index(self.pad_token_id)
else:
SCREAMING_SNAKE_CASE_: Dict = len(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase__ , top_spans=lowerCAmelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase__ , start_index=lowerCAmelCase__ , end_index=lowerCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(lowerCAmelCase__) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , ):
SCREAMING_SNAKE_CASE_: Any = []
for start_index, start_score in enumerate(lowerCAmelCase__):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
SCREAMING_SNAKE_CASE_: Union[str, Any] = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__: x[1] , reverse=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"Wrong span indices: [{start_index}:{end_index}]")
SCREAMING_SNAKE_CASE_: int = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"Span is too long: {length} > {max_answer_length}")
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(lowerCAmelCase__) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase_ )
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[Any] = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase : str = ['''input_ids''', '''attention_mask''']
| 671 | 0 |
"""simple docstring"""
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(_UpperCAmelCase ) , _UpperCAmelCase )
return number - int(_UpperCAmelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 610 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = DistilBertTokenizer
_UpperCAmelCase : Union[str, Any] = DistilBertTokenizerFast
_UpperCAmelCase : int = True
@slow
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[Any] = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")
SCREAMING_SNAKE_CASE_: Any = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 671 | 0 |
def _UpperCAmelCase ( UpperCAmelCase : List[Any] ):
"""simple docstring"""
__lowerCamelCase : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__lowerCamelCase : set[int] = set()
return any(
node not in visited and depth_first_search(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for node in graph )
def _UpperCAmelCase ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
visited.add(_UpperCAmelCase )
rec_stk.add(_UpperCAmelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(_UpperCAmelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 519 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowerCAmelCase : List[Any] = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = test_results.split(" " )
SCREAMING_SNAKE_CASE_: Tuple = 0
SCREAMING_SNAKE_CASE_: str = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
SCREAMING_SNAKE_CASE_: Optional[Any] = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(_UpperCAmelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: Any = None
SCREAMING_SNAKE_CASE_: Union[str, Any] = False
for line in failures_short_lines.split("\n" ):
if re.search(R"_ \[doctest\]" , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = True
SCREAMING_SNAKE_CASE_: Dict = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
SCREAMING_SNAKE_CASE_: Union[str, Any] = line
SCREAMING_SNAKE_CASE_: List[str] = False
return failures
class __lowercase :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Dict = title
SCREAMING_SNAKE_CASE_: int = doc_test_results["time_spent"].split(",")[0]
SCREAMING_SNAKE_CASE_: int = doc_test_results["success"]
SCREAMING_SNAKE_CASE_: Optional[Any] = doc_test_results["failures"]
SCREAMING_SNAKE_CASE_: Any = self.n_success + self.n_failures
# Failures and success of the modeling tests
SCREAMING_SNAKE_CASE_: Optional[int] = doc_test_results
@property
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: int = [self._time_spent]
SCREAMING_SNAKE_CASE_: List[Any] = 0
for time in time_spent:
SCREAMING_SNAKE_CASE_: Union[str, Any] = time.split(":")
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowerCAmelCase__) == 1:
SCREAMING_SNAKE_CASE_: Dict = [0, 0, time_parts[0]]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = int(time_parts[0]), int(time_parts[1]), float(time_parts[2])
total_secs += hours * 3600 + minutes * 60 + seconds
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F"{int(lowerCAmelCase__)}h{int(lowerCAmelCase__)}m{int(lowerCAmelCase__)}s"
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
F" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[Any] = 40
SCREAMING_SNAKE_CASE_: List[str] = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase__ , lowerCAmelCase__)}
SCREAMING_SNAKE_CASE_: Tuple = ""
for category, failures in category_failures.items():
if len(lowerCAmelCase__) == 0:
continue
if report != "":
report += "\n\n"
report += F"*{category} failures*:".ljust(line_length // 2).rjust(line_length // 2) + "\n"
report += "`"
report += "`\n`".join(lowerCAmelCase__)
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: Optional[Any] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures)
if self.n_failures > 0:
blocks.extend([self.category_failures])
if self.n_failures == 0:
blocks.append(self.no_failures)
return json.dumps(lowerCAmelCase__)
@staticmethod
def _SCREAMING_SNAKE_CASE ( ):
SCREAMING_SNAKE_CASE_: List[str] = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print("Sending the following payload")
print(json.dumps({"blocks": json.loads(lowerCAmelCase__)}))
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=lowerCAmelCase__ , )
def _SCREAMING_SNAKE_CASE ( self : Tuple):
print("Sending the following payload")
print(json.dumps({"blocks": json.loads(self.payload)}))
SCREAMING_SNAKE_CASE_: Optional[Any] = F"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed."
SCREAMING_SNAKE_CASE_: List[Any] = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=lowerCAmelCase__ , )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Dict = ""
for key, value in failures.items():
SCREAMING_SNAKE_CASE_: str = value[:200] + " [Truncated]" if len(lowerCAmelCase__) > 250 else value
failures_text += F"*{key}*\n_{value}_\n\n"
SCREAMING_SNAKE_CASE_: Any = job_name
SCREAMING_SNAKE_CASE_: List[Any] = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
SCREAMING_SNAKE_CASE_: Tuple = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _SCREAMING_SNAKE_CASE ( self : Any):
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made.")
SCREAMING_SNAKE_CASE_: Tuple = self.doc_test_results.pop("job_link")
self.doc_test_results.pop("failures")
self.doc_test_results.pop("success")
self.doc_test_results.pop("time_spent")
SCREAMING_SNAKE_CASE_: Any = sorted(self.doc_test_results.items() , key=lambda lowerCAmelCase__: t[0])
for job, job_result in sorted_dict:
if len(job_result["failures"]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = F"*Num failures* :{len(job_result['failed'])} \n"
SCREAMING_SNAKE_CASE_: Optional[Any] = job_result["failures"]
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_reply_blocks(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , text=lowerCAmelCase__)
print("Sending the following reply")
print(json.dumps({"blocks": blocks}))
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F"Results for {job}" , blocks=lowerCAmelCase__ , thread_ts=self.thread_ts["ts"] , )
time.sleep(1)
def A_ ( ):
SCREAMING_SNAKE_CASE_: Tuple = os.environ["GITHUB_RUN_ID"]
SCREAMING_SNAKE_CASE_: Any = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
SCREAMING_SNAKE_CASE_: List[Any] = requests.get(_UpperCAmelCase ).json()
SCREAMING_SNAKE_CASE_: Optional[Any] = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
SCREAMING_SNAKE_CASE_: Any = math.ceil((result["total_count"] - 1_00) / 1_00 )
for i in range(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , _UpperCAmelCase )
return {}
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = {}
if os.path.exists(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = os.listdir(_UpperCAmelCase )
for file in files:
try:
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE_: Dict = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(_UpperCAmelCase , _UpperCAmelCase )}." ) from e
return _artifact
def A_ ( ):
class __lowercase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: Dict = name
SCREAMING_SNAKE_CASE_: List[str] = []
def __str__( self : Optional[Any]):
return self.name
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : str):
self.paths.append({"name": self.name, "path": path})
SCREAMING_SNAKE_CASE_: Dict[str, Artifact] = {}
SCREAMING_SNAKE_CASE_: List[Any] = filter(os.path.isdir , os.listdir() )
for directory in directories:
SCREAMING_SNAKE_CASE_: Dict = directory
if artifact_name not in _available_artifacts:
SCREAMING_SNAKE_CASE_: Tuple = Artifact(_UpperCAmelCase )
_available_artifacts[artifact_name].add_path(_UpperCAmelCase )
return _available_artifacts
if __name__ == "__main__":
lowerCAmelCase : Tuple = get_job_links()
lowerCAmelCase : Optional[Any] = retrieve_available_artifacts()
lowerCAmelCase : Any = collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowerCAmelCase : int = {
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowerCAmelCase : Optional[int] = github_actions_job_links.get("""run_doctests""")
lowerCAmelCase : List[Any] = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
lowerCAmelCase : Any = retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = handle_test_results(artifact["""stats"""])
lowerCAmelCase : List[str] = failed
lowerCAmelCase : Any = success
lowerCAmelCase : Dict = time_spent[1:-1] + """, """
lowerCAmelCase : str = extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
lowerCAmelCase : Tuple = line.replace("""FAILED """, """""")
lowerCAmelCase : str = line.split()[0].replace("""\n""", """""")
if "::" in line:
lowerCAmelCase , lowerCAmelCase : Optional[int] = line.split("""::""")
else:
lowerCAmelCase , lowerCAmelCase : str = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowerCAmelCase : str = docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowerCAmelCase : str = all_failures[test] if test in all_failures else """N/A"""
lowerCAmelCase : Any = failure
break
lowerCAmelCase : Union[str, Any] = Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 671 | 0 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
lowerCamelCase__ : Any = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = R"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@add_start_docstrings(lowerCAmelCase__ )
def __call__( self : Optional[int] , _lowerCAmelCase : torch.LongTensor , _lowerCAmelCase : torch.FloatTensor , **_lowerCAmelCase : int ):
raise NotImplementedError('StoppingCriteria needs to be subclassed' )
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] = None ):
SCREAMING_SNAKE_CASE_ = max_length
SCREAMING_SNAKE_CASE_ = max_position_embeddings
@add_start_docstrings(lowerCAmelCase__ )
def __call__( self : Union[str, Any] , _lowerCAmelCase : torch.LongTensor , _lowerCAmelCase : torch.FloatTensor , **_lowerCAmelCase : List[Any] ):
SCREAMING_SNAKE_CASE_ = input_ids.shape[-1]
SCREAMING_SNAKE_CASE_ = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'This is a friendly reminder - the current text generation call will exceed the model\'s predefined '
F"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
'exceptions, performance degradation, or nothing at all.' )
return is_done
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : str , _lowerCAmelCase : int , _lowerCAmelCase : int ):
warnings.warn(
'The class `MaxNewTokensCriteria` is deprecated. '
F"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
'with `max_length = start_length + max_new_tokens` instead.' , lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_ = start_length
SCREAMING_SNAKE_CASE_ = max_new_tokens
SCREAMING_SNAKE_CASE_ = start_length + max_new_tokens
@add_start_docstrings(lowerCAmelCase__ )
def __call__( self : Dict , _lowerCAmelCase : torch.LongTensor , _lowerCAmelCase : torch.FloatTensor , **_lowerCAmelCase : List[Any] ):
return input_ids.shape[-1] >= self.max_length
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : float , _lowerCAmelCase : Optional[float] = None ):
SCREAMING_SNAKE_CASE_ = max_time
SCREAMING_SNAKE_CASE_ = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(lowerCAmelCase__ )
def __call__( self : Optional[Any] , _lowerCAmelCase : torch.LongTensor , _lowerCAmelCase : torch.FloatTensor , **_lowerCAmelCase : Optional[int] ):
return time.time() - self.initial_timestamp > self.max_time
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
@add_start_docstrings(lowerCAmelCase__ )
def __call__( self : Optional[Any] , _lowerCAmelCase : torch.LongTensor , _lowerCAmelCase : torch.FloatTensor , **_lowerCAmelCase : Any ):
return any(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) for criteria in self )
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
for stopping_criterium in self:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return stopping_criterium.max_length
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return stopping_criterium.max_length
return None
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE_ = stopping_criteria.max_length
SCREAMING_SNAKE_CASE_ = deepcopy(_UpperCAmelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('You set different `max_length` for stopping criteria and `max_length` parameter' , _UpperCAmelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_UpperCAmelCase ) )
return new_stopping_criteria | 31 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : str = 16
lowerCAmelCase : List[Any] = 32
def A_ ( _UpperCAmelCase , _UpperCAmelCase = 16 ):
SCREAMING_SNAKE_CASE_: List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE_: Tuple = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_: str = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_: List[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_: Tuple = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_: int = 8
else:
SCREAMING_SNAKE_CASE_: Any = None
return tokenizer.pad(
_UpperCAmelCase , padding="longest" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Optional[Any] = DataLoader(
tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple = DataLoader(
tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase : Optional[int] = mocked_dataloaders # noqa: F811
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _UpperCAmelCase ) == "1":
SCREAMING_SNAKE_CASE_: Tuple = 2
# New Code #
SCREAMING_SNAKE_CASE_: List[str] = int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE_: int = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCAmelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_: Tuple = config["lr"]
SCREAMING_SNAKE_CASE_: List[str] = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE_: List[str] = int(config["seed"] )
SCREAMING_SNAKE_CASE_: Optional[int] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE_: str = evaluate.load("glue" , "mrpc" )
set_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_: Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_: List[Any] = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_: Union[str, Any] = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_: str = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = output.loss
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[Any] = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE_: List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _UpperCAmelCase )
def A_ ( ):
SCREAMING_SNAKE_CASE_: str = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=_UpperCAmelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE_: List[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE_: Tuple = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 671 | 0 |
"""simple docstring"""
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->int:
"""simple docstring"""
__lowercase : Optional[int] = len(_UpperCAmelCase )
print("The following activities are selected:" )
# The first activity is always selected
__lowercase : Optional[Any] = 0
print(_UpperCAmelCase, end="," )
# Consider rest of the activities
for j in range(_UpperCAmelCase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(_UpperCAmelCase, end="," )
__lowercase : Optional[int] = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Tuple = [1, 3, 0, 5, 8, 5]
__A : str = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 575 |
from math import asin, atan, cos, radians, sin, sqrt, tan
lowerCAmelCase : Union[str, Any] = 637_8137.0
lowerCAmelCase : int = 635_6752.31_4245
lowerCAmelCase : Union[str, Any] = 6378137
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = (AXIS_A - AXIS_B) / AXIS_A
SCREAMING_SNAKE_CASE_: str = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
SCREAMING_SNAKE_CASE_: Optional[int] = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
SCREAMING_SNAKE_CASE_: Any = radians(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = radians(_UpperCAmelCase )
# Equation
SCREAMING_SNAKE_CASE_: str = sin((phi_a - phi_a) / 2 )
SCREAMING_SNAKE_CASE_: List[Any] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
SCREAMING_SNAKE_CASE_: Tuple = sqrt(sin_sq_phi + (cos(_UpperCAmelCase ) * cos(_UpperCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 0 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_a : Tuple = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class __A (unittest.TestCase ):
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = True , ):
__UpperCAmelCase : Any = [file for file in os.listdir(lowerCAmelCase__ ) if os.path.isfile(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) )]
if identifier is not None:
__UpperCAmelCase : Dict = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
for n_ in n_identifier:
__UpperCAmelCase : Any = [file for file in files if n_ not in file]
else:
__UpperCAmelCase : Union[str, Any] = [file for file in files if n_identifier not in file]
__UpperCAmelCase : Tuple = ignore_files or []
ignore_files.append("__init__.py" )
__UpperCAmelCase : Tuple = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("Testing" , lowerCAmelCase__ )
if only_modules:
__UpperCAmelCase : Tuple = file.split("." )[0]
try:
__UpperCAmelCase : int = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
__UpperCAmelCase : int = doctest.DocTestSuite(lowerCAmelCase__ )
__UpperCAmelCase : Any = unittest.TextTestRunner().run(lowerCAmelCase__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""" )
else:
__UpperCAmelCase : str = doctest.testfile(str(".." / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = Path("src/transformers" )
__UpperCAmelCase : str = "modeling"
__UpperCAmelCase : List[Any] = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(lowerCAmelCase__ , identifier=lowerCAmelCase__ , ignore_files=lowerCAmelCase__ )
def _snake_case ( self ):
__UpperCAmelCase : List[str] = Path("src/transformers" )
__UpperCAmelCase : Optional[Any] = "tokenization"
self.analyze_directory(lowerCAmelCase__ , identifier=lowerCAmelCase__ )
def _snake_case ( self ):
__UpperCAmelCase : Any = Path("src/transformers" )
__UpperCAmelCase : int = "configuration"
self.analyze_directory(lowerCAmelCase__ , identifier=lowerCAmelCase__ )
def _snake_case ( self ):
__UpperCAmelCase : Optional[int] = Path("src/transformers" )
__UpperCAmelCase : int = ["configuration", "modeling", "tokenization"]
self.analyze_directory(lowerCAmelCase__ , n_identifier=lowerCAmelCase__ )
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = Path("docs/source" )
__UpperCAmelCase : List[Any] = ["favicon.ico"]
self.analyze_directory(lowerCAmelCase__ , ignore_files=lowerCAmelCase__ , only_modules=lowerCAmelCase__ )
| 168 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_: List[Any] = BertConfig.from_json_file(_UpperCAmelCase )
print(f"Building PyTorch model from configuration: {config}" )
SCREAMING_SNAKE_CASE_: Tuple = BertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 671 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
"""configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GraphormerForGraphClassification""",
"""GraphormerModel""",
"""GraphormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 417 |
import math
def A_ ( _UpperCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A_ ( _UpperCAmelCase = 0.1 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = 3
SCREAMING_SNAKE_CASE_: Optional[int] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_UpperCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 0 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
__A = random.Random()
def lowercase_ ( _lowerCamelCase: Any , _lowerCamelCase: List[str]=1.0 , _lowerCamelCase: Union[str, Any]=None , _lowerCamelCase: List[Any]=None ) -> str:
'''simple docstring'''
if rng is None:
__lowerCamelCase : Dict = global_rng
__lowerCamelCase : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class _snake_case ( unittest.TestCase ):
def __init__( self : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : str=7 , UpperCAmelCase : Dict=400 , UpperCAmelCase : Tuple=2000 , UpperCAmelCase : Tuple=1 , UpperCAmelCase : int=0.0 , UpperCAmelCase : List[str]=16000 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Optional[int]=80 , UpperCAmelCase : List[str]=16 , UpperCAmelCase : Optional[Any]=64 , UpperCAmelCase : int="hann_window" , UpperCAmelCase : int=80 , UpperCAmelCase : Tuple=7600 , UpperCAmelCase : str=1E-10 , UpperCAmelCase : List[Any]=True , ):
__lowerCamelCase : int = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : int = min_seq_length
__lowerCamelCase : List[Any] = max_seq_length
__lowerCamelCase : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCamelCase : int = feature_size
__lowerCamelCase : List[Any] = padding_value
__lowerCamelCase : Optional[Any] = sampling_rate
__lowerCamelCase : Optional[Any] = do_normalize
__lowerCamelCase : Union[str, Any] = num_mel_bins
__lowerCamelCase : Optional[int] = hop_length
__lowerCamelCase : Optional[int] = win_length
__lowerCamelCase : Optional[Any] = win_function
__lowerCamelCase : Optional[int] = fmin
__lowerCamelCase : int = fmax
__lowerCamelCase : Tuple = mel_floor
__lowerCamelCase : Tuple = return_attention_mask
def lowerCamelCase__ ( self : Optional[Any] ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : int=False , UpperCAmelCase : Optional[int]=False ):
def _flatten(UpperCAmelCase : Optional[Any] ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
__lowerCamelCase : List[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__lowerCamelCase : Any = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCamelCase : Union[str, Any] = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : int=False ):
if equal_length:
__lowerCamelCase : Any = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowerCamelCase : Dict = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCamelCase : List[str] = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class _snake_case ( UpperCAmelCase_ , unittest.TestCase ):
snake_case__ = SpeechTaFeatureExtractor
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Dict = SpeechTaFeatureExtractionTester(self )
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : Optional[Any] ):
self.assertTrue(np.all(np.mean(lowerCAmelCase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ , axis=0 ) - 1 ) < 1E-3 ) )
def lowerCamelCase__ ( self : Optional[int] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__lowerCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowerCamelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCamelCase : int = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
__lowerCamelCase : Any = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
__lowerCamelCase : List[str] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
__lowerCamelCase : List[Any] = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
__lowerCamelCase : List[Any] = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCamelCase : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCamelCase : str = ["longest", "max_length", "do_not_pad"]
__lowerCamelCase : Union[str, Any] = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
__lowerCamelCase : Tuple = feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors="np" )
__lowerCamelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCamelCase : Any = range(800 , 1400 , 200 )
__lowerCamelCase : Dict = [floats_list((1, x) )[0] for x in lengths]
__lowerCamelCase : List[str] = ["longest", "max_length", "do_not_pad"]
__lowerCamelCase : Union[str, Any] = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
__lowerCamelCase : Union[str, Any] = feat_extract(lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding=lowerCAmelCase__ )
__lowerCamelCase : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCamelCase : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCamelCase : List[Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="max_length" , return_tensors="np" )
__lowerCamelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCamelCase : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCamelCase : Tuple = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="longest" , return_tensors="np" )
__lowerCamelCase : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
__lowerCamelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCamelCase : Optional[int] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=2000 , padding="longest" , return_tensors="np" )
__lowerCamelCase : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCamelCase : Any = np.random.rand(100 ).astype(np.floataa )
__lowerCamelCase : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowerCamelCase : Union[str, Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__lowerCamelCase : int = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCamelCase__ ( self : str ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__lowerCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowerCamelCase : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCamelCase : Union[str, Any] = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test feature size
__lowerCamelCase : int = feature_extractor(audio_target=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
__lowerCamelCase : Tuple = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
__lowerCamelCase : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
__lowerCamelCase : Optional[int] = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
__lowerCamelCase : Optional[int] = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__lowerCamelCase : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__lowerCamelCase : Optional[Any] = np.asarray(lowerCAmelCase__ )
__lowerCamelCase : Tuple = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
__lowerCamelCase : List[Any] = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : List[str] = self.feat_extract_tester.prepare_inputs_for_target()
__lowerCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCamelCase : int = feat_extract.model_input_names[0]
__lowerCamelCase : str = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ) for x, y in zip(lowerCAmelCase__ , processed_features[input_name] ) ) )
__lowerCamelCase : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
__lowerCamelCase : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
__lowerCamelCase : Optional[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowerCamelCase : str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
__lowerCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCamelCase : Union[str, Any] = feat_extract.model_input_names[0]
__lowerCamelCase : Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
__lowerCamelCase : Dict = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowerCamelCase : str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : Any = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCamelCase : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_target()
__lowerCamelCase : str = feat_extract.model_input_names[0]
__lowerCamelCase : List[Any] = BatchFeature({input_name: speech_inputs} )
__lowerCamelCase : Tuple = feat_extract.num_mel_bins # hack!
__lowerCamelCase : str = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )[input_name]
__lowerCamelCase : Optional[Any] = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : Dict = self.feat_extract_dict
__lowerCamelCase : int = True
__lowerCamelCase : Optional[int] = self.feature_extraction_class(**lowerCAmelCase__ )
__lowerCamelCase : Tuple = self.feat_extract_tester.prepare_inputs_for_target()
__lowerCamelCase : str = [len(lowerCAmelCase__ ) for x in speech_inputs]
__lowerCamelCase : List[Any] = feat_extract.model_input_names[0]
__lowerCamelCase : int = BatchFeature({input_name: speech_inputs} )
__lowerCamelCase : Optional[Any] = feat_extract.num_mel_bins # hack!
__lowerCamelCase : Any = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCAmelCase__ )
def lowerCamelCase__ ( self : int ):
__lowerCamelCase : int = self.feat_extract_dict
__lowerCamelCase : Tuple = True
__lowerCamelCase : List[str] = self.feature_extraction_class(**lowerCAmelCase__ )
__lowerCamelCase : Union[str, Any] = self.feat_extract_tester.prepare_inputs_for_target()
__lowerCamelCase : Tuple = [len(lowerCAmelCase__ ) for x in speech_inputs]
__lowerCamelCase : Dict = feat_extract.model_input_names[0]
__lowerCamelCase : List[str] = BatchFeature({input_name: speech_inputs} )
__lowerCamelCase : Optional[Any] = min(lowerCAmelCase__ )
__lowerCamelCase : List[str] = feat_extract.num_mel_bins # hack!
__lowerCamelCase : Union[str, Any] = feat_extract.pad(
lowerCAmelCase__ , padding="max_length" , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def lowerCamelCase__ ( self : List[Any] , UpperCAmelCase : Optional[int] ):
from datasets import load_dataset
__lowerCamelCase : Dict = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__lowerCamelCase : Tuple = ds.sort("id" ).select(range(lowerCAmelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowerCamelCase__ ( self : int ):
# fmt: off
__lowerCamelCase : int = torch.tensor(
[2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03,
3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03,
2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04,
4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03,
7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04,
4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] )
# fmt: on
__lowerCamelCase : Dict = self._load_datasamples(1 )
__lowerCamelCase : Tuple = SpeechTaFeatureExtractor()
__lowerCamelCase : Optional[Any] = feature_extractor(lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 93680) )
self.assertTrue(torch.allclose(input_values[0, :30] , lowerCAmelCase__ , atol=1E-6 ) )
def lowerCamelCase__ ( self : Dict ):
# fmt: off
__lowerCamelCase : List[str] = torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] )
# fmt: on
__lowerCamelCase : List[str] = self._load_datasamples(1 )
__lowerCamelCase : str = SpeechTaFeatureExtractor()
__lowerCamelCase : Union[str, Any] = feature_extractor(audio_target=lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase__ , atol=1E-4 ) ) | 646 |
import re
def A_ ( _UpperCAmelCase ):
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
try:
SCREAMING_SNAKE_CASE_: List[Any] = split_input(_UpperCAmelCase )
if upper:
SCREAMING_SNAKE_CASE_: List[str] = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
SCREAMING_SNAKE_CASE_: Optional[int] = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def A_ ( _UpperCAmelCase ):
return to_simple_case(_UpperCAmelCase )
def A_ ( _UpperCAmelCase ):
try:
SCREAMING_SNAKE_CASE_: Optional[int] = to_simple_case(_UpperCAmelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
return to_complex_case(_UpperCAmelCase , _UpperCAmelCase , "_" )
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
return to_complex_case(_UpperCAmelCase , _UpperCAmelCase , "-" )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 671 | 0 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class lowerCAmelCase_ :
UpperCamelCase_ :Optional[Union[str, Path]] = None
UpperCamelCase_ :bool = False
UpperCamelCase_ :bool = False
UpperCamelCase_ :bool = False
UpperCamelCase_ :Optional[Dict] = None
UpperCamelCase_ :Optional[str] = None
UpperCamelCase_ :bool = False
UpperCamelCase_ :bool = False
UpperCamelCase_ :bool = False
UpperCamelCase_ :bool = True
UpperCamelCase_ :Optional[int] = None
UpperCamelCase_ :int = 1
UpperCamelCase_ :Optional[Union[str, bool]] = None
UpperCamelCase_ :bool = False
UpperCamelCase_ :Optional[Dict] = None
UpperCamelCase_ :Optional[str] = None
def __snake_case ( self : Any ):
return self.__class__(**{k: copy.deepcopy(lowerCAmelCase__ ) for k, v in self.__dict__.items()} )
| 668 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = '''upernet'''
def __init__( self : Any , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : str=[1, 2, 3, 6] , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Dict=0.4 , lowerCAmelCase__ : int=384 , lowerCAmelCase__ : Union[str, Any]=256 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : List[str]=255 , **lowerCAmelCase__ : List[str] , ):
super().__init__(**lowerCAmelCase__)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
SCREAMING_SNAKE_CASE_: Dict = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = backbone_config.get("model_type")
SCREAMING_SNAKE_CASE_: str = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_: Tuple = config_class.from_dict(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = backbone_config
SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_: Dict = initializer_range
SCREAMING_SNAKE_CASE_: Any = pool_scales
SCREAMING_SNAKE_CASE_: Optional[Any] = use_auxiliary_head
SCREAMING_SNAKE_CASE_: str = auxiliary_loss_weight
SCREAMING_SNAKE_CASE_: List[Any] = auxiliary_in_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] = auxiliary_channels
SCREAMING_SNAKE_CASE_: Dict = auxiliary_num_convs
SCREAMING_SNAKE_CASE_: str = auxiliary_concat_input
SCREAMING_SNAKE_CASE_: Dict = loss_ignore_index
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Tuple = copy.deepcopy(self.__dict__)
SCREAMING_SNAKE_CASE_: int = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_: Optional[int] = self.__class__.model_type
return output
| 671 | 0 |
import os
def _lowerCAmelCase ( ):
UpperCAmelCase_ = os.path.dirname(os.path.realpath(_UpperCAmelCase ) )
UpperCAmelCase_ = os.path.join(_UpperCAmelCase , '''triangle.txt''' )
with open(_UpperCAmelCase ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = []
for line in triangle:
UpperCAmelCase_ = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(_UpperCAmelCase ) )
a.append(_UpperCAmelCase )
for i in range(1 , len(_UpperCAmelCase ) ):
for j in range(len(a[i] ) ):
UpperCAmelCase_ = a[i - 1][j] if j != len(a[i - 1] ) else 0
UpperCAmelCase_ = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(_UpperCAmelCase , _UpperCAmelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 121 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: List[str] = torch.nn.Linear(10 , 10)
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.optim.SGD(model.parameters() , 0.1)
SCREAMING_SNAKE_CASE_: Any = Accelerator()
SCREAMING_SNAKE_CASE_: List[str] = accelerator.prepare(lowerCAmelCase__)
try:
pickle.loads(pickle.dumps(lowerCAmelCase__))
except Exception as e:
self.fail(F"Accelerated optimizer pickling failed with {e}")
AcceleratorState._reset_state()
| 671 | 0 |
from math import sqrt
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
_lowercase : Dict = True
# 0 and 1 are none primes.
if number <= 1:
_lowercase : Optional[int] = False
for divisor in range(2 , int(round(sqrt(_UpperCAmelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
_lowercase : Any = False
break
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'status' must been from type bool"
return status
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
_lowercase : Any = list(range(2 , n + 1 ) )
_lowercase : Optional[Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_UpperCAmelCase ) ):
for j in range(i + 1 , len(_UpperCAmelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
_lowercase : Optional[Any] = 0
# filters actual prime numbers.
_lowercase : Any = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type list"
return ans
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n > 2), "'N' must been an int and > 2"
_lowercase : Dict = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_UpperCAmelCase ):
ans.append(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type list"
return ans
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and number >= 0, "'number' must been an int and >= 0"
_lowercase : Any = [] # this list will be returns of the function.
# potential prime number factors.
_lowercase : Tuple = 2
_lowercase : str = number
if number == 0 or number == 1:
ans.append(_UpperCAmelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_UpperCAmelCase ):
while quotient != 1:
if is_prime(_UpperCAmelCase ) and (quotient % factor == 0):
ans.append(_UpperCAmelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type list"
return ans
def UpperCamelCase_( lowerCamelCase_ ) -> List[str]:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
_lowercase : Tuple = 0
# prime factorization of 'number'
_lowercase : Optional[Any] = prime_factorization(_UpperCAmelCase )
_lowercase : Optional[int] = max(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type int"
return ans
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
_lowercase : List[Any] = 0
# prime factorization of 'number'
_lowercase : Union[str, Any] = prime_factorization(_UpperCAmelCase )
_lowercase : Union[str, Any] = min(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'ans' must been from type int"
return ans
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _UpperCAmelCase ), "compare bust been from type bool"
return number % 2 == 0
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _UpperCAmelCase ), "compare bust been from type bool"
return number % 2 != 0
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (number > 2) and is_even(_UpperCAmelCase )
), "'number' must been an int, even and > 2"
_lowercase : str = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
_lowercase : Tuple = get_prime_numbers(_UpperCAmelCase )
_lowercase : Union[str, Any] = len(_UpperCAmelCase )
# run variable for while-loops.
_lowercase : List[Any] = 0
_lowercase : int = None
# exit variable. for break up the loops
_lowercase : List[str] = True
while i < len_pn and loop:
_lowercase : Optional[int] = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
_lowercase : List[Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (len(_UpperCAmelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
_lowercase : int = 0
while numbera != 0:
_lowercase : Optional[Any] = numbera % numbera
_lowercase : int = numbera
_lowercase : Tuple = rest
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> str:
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
_lowercase : Any = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
_lowercase : Any = prime_factorization(_UpperCAmelCase )
_lowercase : Union[str, Any] = prime_factorization(_UpperCAmelCase )
elif numbera == 1 or numbera == 1:
_lowercase : List[str] = []
_lowercase : Union[str, Any] = []
_lowercase : Optional[int] = max(_UpperCAmelCase , _UpperCAmelCase )
_lowercase : Union[str, Any] = 0
_lowercase : int = 0
_lowercase : Dict = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
_lowercase : Optional[Any] = prime_fac_a.count(_UpperCAmelCase )
_lowercase : Any = prime_fac_a.count(_UpperCAmelCase )
for _ in range(max(_UpperCAmelCase , _UpperCAmelCase ) ):
ans *= n
else:
_lowercase : Optional[int] = prime_fac_a.count(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
ans *= n
done.append(_UpperCAmelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
_lowercase : List[Any] = prime_fac_a.count(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
ans *= n
done.append(_UpperCAmelCase )
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n >= 0), "'number' must been a positive int"
_lowercase : List[Any] = 0
_lowercase : Optional[Any] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_UpperCAmelCase ):
ans += 1
# precondition
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and is_prime(
_UpperCAmelCase ), "'ans' must been a prime number and from type int"
return ans
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
assert (
is_prime(_UpperCAmelCase ) and is_prime(_UpperCAmelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
_lowercase : Optional[Any] = p_number_a + 1 # jump to the next number
_lowercase : Optional[Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_UpperCAmelCase ):
number += 1
while number < p_number_a:
ans.append(_UpperCAmelCase )
number += 1
# fetch the next prime number.
while not is_prime(_UpperCAmelCase ):
number += 1
# precondition
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and ans[0] != p_number_a
and ans[len(_UpperCAmelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def UpperCamelCase_( lowerCamelCase_ ) -> Tuple:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n >= 1), "'n' must been int and >= 1"
_lowercase : Any = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_UpperCAmelCase )
# precondition
assert ans[0] == 1 and ans[len(_UpperCAmelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[Any]:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (
number > 1
), "'number' must been an int and >= 1"
_lowercase : List[str] = get_divisors(_UpperCAmelCase )
# precondition
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (divisors[0] == 1)
and (divisors[len(_UpperCAmelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
_lowercase : str = gcd(abs(_UpperCAmelCase ) , abs(_UpperCAmelCase ) )
# precondition
assert (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def UpperCamelCase_( lowerCamelCase_ ) -> str:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n >= 0), "'n' must been a int and >= 0"
_lowercase : List[Any] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def UpperCamelCase_( lowerCamelCase_ ) -> List[Any]:
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and (n >= 0), "'n' must been an int and >= 0"
_lowercase : List[str] = 0
_lowercase : Optional[int] = 1
_lowercase : List[Any] = 1 # this will be return
for _ in range(n - 1 ):
_lowercase : Tuple = ans
ans += fiba
_lowercase : List[str] = tmp
return ans
| 89 |
from itertools import count
def A_ ( _UpperCAmelCase = 50 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = [1] * min_block_length
for n in count(_UpperCAmelCase ):
fill_count_functions.append(1 )
for block_length in range(_UpperCAmelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 671 | 0 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class lowerCAmelCase__ ( unittest.TestCase ):
def A_ ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = torch.nn.Linear(10 , 10 )
_UpperCamelCase = torch.optim.SGD(model.parameters() , 0.1 )
_UpperCamelCase = Accelerator()
_UpperCamelCase = accelerator.prepare(lowerCAmelCase__ )
try:
pickle.loads(pickle.dumps(lowerCAmelCase__ ) )
except Exception as e:
self.fail(F'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 612 |
def A_ ( _UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError("only integers accepted as input" )
else:
SCREAMING_SNAKE_CASE_: List[Any] = str(abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_: Tuple = [list(_UpperCAmelCase ) for char in range(len(_UpperCAmelCase ) )]
for index in range(len(_UpperCAmelCase ) ):
num_transpositions[index].pop(_UpperCAmelCase )
return max(
int("".join(list(_UpperCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 671 | 0 |
"""simple docstring"""
from math import ceil
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Any:
"""simple docstring"""
lowerCAmelCase_ : List[str] = list(range(0 , _UpperCAmelCase ) )
lowerCAmelCase_ : Optional[int] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
lowerCAmelCase_ : int = []
for i in device_map_blocks:
if device_map_blocks.count(_UpperCAmelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_UpperCAmelCase )
# Missing blocks
lowerCAmelCase_ : Optional[Any] = [i for i in blocks if i not in device_map_blocks]
lowerCAmelCase_ : Any = [i for i in device_map_blocks if i not in blocks]
if len(_UpperCAmelCase ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(_UpperCAmelCase ) )
if len(_UpperCAmelCase ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(_UpperCAmelCase ) )
if len(_UpperCAmelCase ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(_UpperCAmelCase ) )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = list(range(_UpperCAmelCase ) )
lowerCAmelCase_ : Tuple = int(ceil(n_layers / len(_UpperCAmelCase ) ) )
lowerCAmelCase_ : Tuple = [layers[i : i + n_blocks] for i in range(0 , _UpperCAmelCase , _UpperCAmelCase )]
return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
| 610 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __lowercase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : Any):
SCREAMING_SNAKE_CASE_: Any = data
SCREAMING_SNAKE_CASE_: Node | None = None
class __lowercase :
"""simple docstring"""
def __init__( self : int):
SCREAMING_SNAKE_CASE_: Dict = None
SCREAMING_SNAKE_CASE_: str = None
def __iter__( self : List[str]):
SCREAMING_SNAKE_CASE_: Tuple = self.head
while self.head:
yield node.data
SCREAMING_SNAKE_CASE_: List[str] = node.next
if node == self.head:
break
def __len__( self : Dict):
return sum(1 for _ in self)
def __repr__( self : Dict):
return "->".join(str(lowerCAmelCase__) for item in iter(self))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any):
self.insert_nth(len(self) , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any):
self.insert_nth(0 , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any):
if index < 0 or index > len(self):
raise IndexError("list index out of range.")
SCREAMING_SNAKE_CASE_: Any = Node(lowerCAmelCase__)
if self.head is None:
SCREAMING_SNAKE_CASE_: str = new_node # first node points itself
SCREAMING_SNAKE_CASE_: Optional[Any] = new_node
elif index == 0: # insert at head
SCREAMING_SNAKE_CASE_: Optional[Any] = self.head
SCREAMING_SNAKE_CASE_: str = new_node
else:
SCREAMING_SNAKE_CASE_: int = self.head
for _ in range(index - 1):
SCREAMING_SNAKE_CASE_: Optional[Any] = temp.next
SCREAMING_SNAKE_CASE_: List[str] = temp.next
SCREAMING_SNAKE_CASE_: int = new_node
if index == len(self) - 1: # insert at tail
SCREAMING_SNAKE_CASE_: Any = new_node
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return self.delete_nth(0)
def _SCREAMING_SNAKE_CASE ( self : Any):
return self.delete_nth(len(self) - 1)
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : int = 0):
if not 0 <= index < len(self):
raise IndexError("list index out of range.")
SCREAMING_SNAKE_CASE_: Optional[Any] = self.head
if self.head == self.tail: # just one node
SCREAMING_SNAKE_CASE_: List[str] = None
elif index == 0: # delete head node
SCREAMING_SNAKE_CASE_: int = self.tail.next.next
SCREAMING_SNAKE_CASE_: Tuple = self.head.next
else:
SCREAMING_SNAKE_CASE_: Optional[int] = self.head
for _ in range(index - 1):
SCREAMING_SNAKE_CASE_: Any = temp.next
SCREAMING_SNAKE_CASE_: Optional[Any] = temp.next
SCREAMING_SNAKE_CASE_: int = temp.next.next
if index == len(self) - 1: # delete at tail
SCREAMING_SNAKE_CASE_: int = temp
return delete_node.data
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return len(self) == 0
def A_ ( ):
SCREAMING_SNAKE_CASE_: Dict = CircularLinkedList()
assert len(_UpperCAmelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(_UpperCAmelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(_UpperCAmelCase ) == i
circular_linked_list.insert_nth(_UpperCAmelCase , i + 1 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__UpperCamelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCamelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[Any] , _lowerCamelCase : WhisperForConditionalGeneration , _lowerCamelCase : WhisperProcessor , _lowerCamelCase : AutoencoderKL , _lowerCamelCase : CLIPTextModel , _lowerCamelCase : CLIPTokenizer , _lowerCamelCase : UNetaDConditionModel , _lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _lowerCamelCase : StableDiffusionSafetyChecker , _lowerCamelCase : CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=lowerCAmelCase__ , speech_processor=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , )
def _snake_case ( self : Optional[Any] , _lowerCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
__lowerCamelCase : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase__ )
def _snake_case ( self : int ):
'''simple docstring'''
self.enable_attention_slicing(lowerCAmelCase__ )
@torch.no_grad()
def __call__( self : int , _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]=1_6_0_0_0 , _lowerCamelCase : int = 5_1_2 , _lowerCamelCase : int = 5_1_2 , _lowerCamelCase : int = 5_0 , _lowerCamelCase : float = 7.5 , _lowerCamelCase : Optional[Union[str, List[str]]] = None , _lowerCamelCase : Optional[int] = 1 , _lowerCamelCase : float = 0.0 , _lowerCamelCase : Optional[torch.Generator] = None , _lowerCamelCase : Optional[torch.FloatTensor] = None , _lowerCamelCase : Optional[str] = "pil" , _lowerCamelCase : bool = True , _lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _lowerCamelCase : int = 1 , **_lowerCamelCase : Dict , ):
'''simple docstring'''
__lowerCamelCase : List[str] = self.speech_processor.feature_extractor(
lowerCAmelCase__ , return_tensors="""pt""" , sampling_rate=lowerCAmelCase__ ).input_features.to(self.device )
__lowerCamelCase : Optional[int] = self.speech_model.generate(lowerCAmelCase__ , max_length=4_8_0_0_0_0 )
__lowerCamelCase : List[Any] = self.speech_processor.tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , normalize=lowerCAmelCase__ )[
0
]
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__lowerCamelCase : str = 1
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__lowerCamelCase : int = len(lowerCAmelCase__ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase__ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(lowerCAmelCase__ )}.""" )
# get prompt text embeddings
__lowerCamelCase : Dict = self.tokenizer(
lowerCAmelCase__ , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__lowerCamelCase : List[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowerCamelCase : Union[str, Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__lowerCamelCase : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
__lowerCamelCase : List[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__lowerCamelCase : Optional[Any] = text_embeddings.shape
__lowerCamelCase : List[Any] = text_embeddings.repeat(1 , lowerCAmelCase__ , 1 )
__lowerCamelCase : Tuple = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCAmelCase__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__lowerCamelCase : Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__lowerCamelCase : List[str]
if negative_prompt is None:
__lowerCamelCase : Tuple = [""] * batch_size
elif type(lowerCAmelCase__ ) is not type(lowerCAmelCase__ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCAmelCase__ )} !="""
F""" {type(lowerCAmelCase__ )}.""" )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__lowerCamelCase : Any = [negative_prompt]
elif batch_size != len(lowerCAmelCase__ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCAmelCase__ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
__lowerCamelCase : Tuple = negative_prompt
__lowerCamelCase : Union[str, Any] = text_input_ids.shape[-1]
__lowerCamelCase : Dict = self.tokenizer(
lowerCAmelCase__ , padding="""max_length""" , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="""pt""" , )
__lowerCamelCase : List[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCamelCase : int = uncond_embeddings.shape[1]
__lowerCamelCase : Optional[Any] = uncond_embeddings.repeat(1 , lowerCAmelCase__ , 1 )
__lowerCamelCase : Dict = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCAmelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__lowerCamelCase : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__lowerCamelCase : Any = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__lowerCamelCase : Tuple = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device="""cpu""" , dtype=lowerCAmelCase__ ).to(
self.device )
else:
__lowerCamelCase : Optional[Any] = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=lowerCAmelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__lowerCamelCase : List[str] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__lowerCamelCase : int = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__lowerCamelCase : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowerCamelCase : List[str] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowerCamelCase : str = {}
if accepts_eta:
__lowerCamelCase : Tuple = eta
for i, t in enumerate(self.progress_bar(lowerCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
__lowerCamelCase : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCamelCase : Optional[int] = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
# predict the noise residual
__lowerCamelCase : Optional[Any] = self.unet(lowerCAmelCase__ , lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ ).sample
# perform guidance
if do_classifier_free_guidance:
__lowerCamelCase : List[str] = noise_pred.chunk(2 )
__lowerCamelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase : List[str] = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__lowerCamelCase : List[Any] = 1 / 0.18_215 * latents
__lowerCamelCase : Union[str, Any] = self.vae.decode(lowerCAmelCase__ ).sample
__lowerCamelCase : Tuple = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowerCamelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCamelCase : Optional[Any] = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowerCAmelCase__ , nsfw_content_detected=lowerCAmelCase__ )
| 519 |
from collections import defaultdict
from math import ceil, sqrt
def A_ ( _UpperCAmelCase = 1_00_00_00 , _UpperCAmelCase = 10 ):
SCREAMING_SNAKE_CASE_: defaultdict = defaultdict(_UpperCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
SCREAMING_SNAKE_CASE_: Tuple = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_UpperCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 671 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowerCAmelCase : TransformeraDModel , _lowerCAmelCase : AutoencoderKL , _lowerCAmelCase : KarrasDiffusionSchedulers , _lowerCAmelCase : Optional[Dict[int, str]] = None , ):
super().__init__()
self.register_modules(transformer=lowerCAmelCase__ , vae=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
# create a imagenet -> id dictionary for easier use
SCREAMING_SNAKE_CASE_ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(',' ):
SCREAMING_SNAKE_CASE_ = int(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = dict(sorted(self.labels.items() ) )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Union[str, List[str]] ):
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = list(lowerCAmelCase__ )
for l in label:
if l not in self.labels:
raise ValueError(
F"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}." )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Tuple , _lowerCAmelCase : List[int] , _lowerCAmelCase : float = 4.0 , _lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCAmelCase : int = 50 , _lowerCAmelCase : Optional[str] = "pil" , _lowerCAmelCase : bool = True , ):
SCREAMING_SNAKE_CASE_ = len(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = self.transformer.config.sample_size
SCREAMING_SNAKE_CASE_ = self.transformer.config.in_channels
SCREAMING_SNAKE_CASE_ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCAmelCase__ , device=self.device , dtype=self.transformer.dtype , )
SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
SCREAMING_SNAKE_CASE_ = torch.tensor(lowerCAmelCase__ , device=self.device ).reshape(-1 )
SCREAMING_SNAKE_CASE_ = torch.tensor([1_000] * batch_size , device=self.device )
SCREAMING_SNAKE_CASE_ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowerCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
SCREAMING_SNAKE_CASE_ = latent_model_input[: len(lowerCAmelCase__ ) // 2]
SCREAMING_SNAKE_CASE_ = torch.cat([half, half] , dim=0 )
SCREAMING_SNAKE_CASE_ = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ = t
if not torch.is_tensor(lowerCAmelCase__ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
SCREAMING_SNAKE_CASE_ = latent_model_input.device.type == "mps"
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = torch.floataa if is_mps else torch.floataa
else:
SCREAMING_SNAKE_CASE_ = torch.intaa if is_mps else torch.intaa
SCREAMING_SNAKE_CASE_ = torch.tensor([timesteps] , dtype=lowerCAmelCase__ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE_ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE_ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
SCREAMING_SNAKE_CASE_ = self.transformer(
lowerCAmelCase__ , timestep=lowerCAmelCase__ , class_labels=lowerCAmelCase__ ).sample
# perform guidance
if guidance_scale > 1:
SCREAMING_SNAKE_CASE_ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
SCREAMING_SNAKE_CASE_ = torch.split(lowerCAmelCase__ , len(lowerCAmelCase__ ) // 2 , dim=0 )
SCREAMING_SNAKE_CASE_ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
SCREAMING_SNAKE_CASE_ = torch.cat([half_eps, half_eps] , dim=0 )
SCREAMING_SNAKE_CASE_ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
SCREAMING_SNAKE_CASE_ = torch.split(lowerCAmelCase__ , lowerCAmelCase__ , dim=1 )
else:
SCREAMING_SNAKE_CASE_ = noise_pred
# compute previous image: x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
if guidance_scale > 1:
SCREAMING_SNAKE_CASE_ = latent_model_input.chunk(2 , dim=0 )
else:
SCREAMING_SNAKE_CASE_ = latent_model_input
SCREAMING_SNAKE_CASE_ = 1 / self.vae.config.scaling_factor * latents
SCREAMING_SNAKE_CASE_ = self.vae.decode(lowerCAmelCase__ ).sample
SCREAMING_SNAKE_CASE_ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE_ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowerCAmelCase__ ) | 31 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase : str = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 671 | 0 |
"""simple docstring"""
from __future__ import annotations
__A : Any = tuple[int, int, int]
__A : str = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
__A : Union[str, Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
# -------------------------- default selection --------------------------
# rotors --------------------------
__A : Tuple = """EGZWVONAHDCLFQMSIPJBYUKXTR"""
__A : Dict = """FOBHMDKEXQNRAULPGSJVTYICZW"""
__A : Tuple = """ZJXESIUQLHAVRMDOYGTNFWPBKC"""
# reflector --------------------------
__A : List[str] = {
"""A""": """N""",
"""N""": """A""",
"""B""": """O""",
"""O""": """B""",
"""C""": """P""",
"""P""": """C""",
"""D""": """Q""",
"""Q""": """D""",
"""E""": """R""",
"""R""": """E""",
"""F""": """S""",
"""S""": """F""",
"""G""": """T""",
"""T""": """G""",
"""H""": """U""",
"""U""": """H""",
"""I""": """V""",
"""V""": """I""",
"""J""": """W""",
"""W""": """J""",
"""K""": """X""",
"""X""": """K""",
"""L""": """Y""",
"""Y""": """L""",
"""M""": """Z""",
"""Z""": """M""",
}
# -------------------------- extra rotors --------------------------
__A : str = """RMDJXFUWGISLHVTCQNKYPBEZOA"""
__A : List[Any] = """SGLCPQWZHKXAREONTFBVIYJUDM"""
__A : Dict = """HVSICLTYKQUBXDWAJZOMFGPREN"""
__A : Any = """RZWQHFMVDBKICJLNTUXAGYPSOE"""
__A : List[Any] = """LFKIJODBEGAMQPXVUHYSTCZRWN"""
__A : Tuple = """KOAEGVDHXPQZMLFTYWJNBRCIUS"""
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->int:
"""simple docstring"""
if (unique_rotsel := len(set(_UpperCAmelCase ) )) < 3:
__lowercase : List[str] = F'Please use 3 unique rotors (not {unique_rotsel})'
raise Exception(_UpperCAmelCase )
# Checks if rotor positions are valid
__lowercase : List[Any] = rotpos
if not 0 < rotorposa <= len(_UpperCAmelCase ):
__lowercase : Tuple = F'First rotor position is not within range of 1..26 ({rotorposa}'
raise ValueError(_UpperCAmelCase )
if not 0 < rotorposa <= len(_UpperCAmelCase ):
__lowercase : List[str] = F'Second rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(_UpperCAmelCase )
if not 0 < rotorposa <= len(_UpperCAmelCase ):
__lowercase : Dict = F'Third rotor position is not within range of 1..26 ({rotorposa})'
raise ValueError(_UpperCAmelCase )
# Validates string and returns dict
__lowercase : List[str] = _plugboard(_UpperCAmelCase )
return rotpos, rotsel, pbdict
def snake_case__ ( _lowerCamelCase ) ->Dict:
"""simple docstring"""
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ):
__lowercase : Tuple = F'Plugboard setting isn\'t type string ({type(_UpperCAmelCase )})'
raise TypeError(_UpperCAmelCase )
elif len(_UpperCAmelCase ) % 2 != 0:
__lowercase : Optional[Any] = F'Odd number of symbols ({len(_UpperCAmelCase )})'
raise Exception(_UpperCAmelCase )
elif pbstring == "":
return {}
pbstring.replace(" ", "" )
# Checks if all characters are unique
__lowercase : Optional[Any] = set()
for i in pbstring:
if i not in abc:
__lowercase : int = F'\'{i}\' not in list of symbols'
raise Exception(_UpperCAmelCase )
elif i in tmppbl:
__lowercase : Optional[Any] = F'Duplicate symbol ({i})'
raise Exception(_UpperCAmelCase )
else:
tmppbl.add(_UpperCAmelCase )
del tmppbl
# Created the dictionary
__lowercase : Any = {}
for j in range(0, len(_UpperCAmelCase ) - 1, 2 ):
__lowercase : Tuple = pbstring[j + 1]
__lowercase : Any = pbstring[j]
return pb
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase = (rotora, rotora, rotora), _lowerCamelCase = "", ) ->List[str]:
"""simple docstring"""
__lowercase : List[Any] = text.upper()
__lowercase : Dict = _validator(
_UpperCAmelCase, _UpperCAmelCase, plugb.upper() )
__lowercase : int = rotor_position
__lowercase : Optional[Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
__lowercase : Union[str, Any] = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
__lowercase : Tuple = plugboard[symbol]
# rotor ra --------------------------
__lowercase : Optional[Any] = abc.index(_UpperCAmelCase ) + rotorposa
__lowercase : Optional[Any] = rotora[index % len(_UpperCAmelCase )]
# rotor rb --------------------------
__lowercase : Union[str, Any] = abc.index(_UpperCAmelCase ) + rotorposa
__lowercase : List[str] = rotora[index % len(_UpperCAmelCase )]
# rotor rc --------------------------
__lowercase : Tuple = abc.index(_UpperCAmelCase ) + rotorposa
__lowercase : Union[str, Any] = rotora[index % len(_UpperCAmelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
__lowercase : str = reflector[symbol]
# 2nd rotors
__lowercase : int = abc[rotora.index(_UpperCAmelCase ) - rotorposa]
__lowercase : Union[str, Any] = abc[rotora.index(_UpperCAmelCase ) - rotorposa]
__lowercase : Optional[int] = abc[rotora.index(_UpperCAmelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
__lowercase : List[Any] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_UpperCAmelCase ):
__lowercase : List[Any] = 0
rotorposa += 1
if rotorposa >= len(_UpperCAmelCase ):
__lowercase : Any = 0
rotorposa += 1
if rotorposa >= len(_UpperCAmelCase ):
__lowercase : Tuple = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
if __name__ == "__main__":
__A : int = """This is my Python script that emulates the Enigma machine from WWII."""
__A : str = (1, 1, 1)
__A : List[str] = """pictures"""
__A : Optional[Any] = (rotora, rotora, rotora)
__A : List[str] = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
| 575 |
lowerCAmelCase : List[str] = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = set()
# keep track of all the paths to be checked
SCREAMING_SNAKE_CASE_: Tuple = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
SCREAMING_SNAKE_CASE_: List[Any] = queue.pop(0 )
# get the last node from the path
SCREAMING_SNAKE_CASE_: Tuple = path[-1]
if node not in explored:
SCREAMING_SNAKE_CASE_: Union[str, Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
SCREAMING_SNAKE_CASE_: int = list(_UpperCAmelCase )
new_path.append(_UpperCAmelCase )
queue.append(_UpperCAmelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_UpperCAmelCase )
# in case there's no path between the 2 nodes
return []
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
SCREAMING_SNAKE_CASE_: List[Any] = [start]
SCREAMING_SNAKE_CASE_: List[str] = set(_UpperCAmelCase )
# Keep tab on distances from `start` node.
SCREAMING_SNAKE_CASE_: Union[str, Any] = {start: 0, target: -1}
while queue:
SCREAMING_SNAKE_CASE_: Dict = queue.pop(0 )
if node == target:
SCREAMING_SNAKE_CASE_: Tuple = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_UpperCAmelCase )
queue.append(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
| 671 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def _lowercase ( lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Tuple = 384
__UpperCAmelCase : Optional[int] = 7
if "tiny" in model_name:
__UpperCAmelCase : List[Any] = 96
__UpperCAmelCase : Tuple = (2, 2, 6, 2)
__UpperCAmelCase : Optional[int] = (3, 6, 12, 24)
elif "small" in model_name:
__UpperCAmelCase : List[str] = 96
__UpperCAmelCase : Tuple = (2, 2, 18, 2)
__UpperCAmelCase : Any = (3, 6, 12, 24)
elif "base" in model_name:
__UpperCAmelCase : str = 128
__UpperCAmelCase : Optional[int] = (2, 2, 18, 2)
__UpperCAmelCase : Tuple = (4, 8, 16, 32)
__UpperCAmelCase : List[Any] = 12
__UpperCAmelCase : Tuple = 512
elif "large" in model_name:
__UpperCAmelCase : Union[str, Any] = 192
__UpperCAmelCase : int = (2, 2, 18, 2)
__UpperCAmelCase : Union[str, Any] = (6, 12, 24, 48)
__UpperCAmelCase : Dict = 12
__UpperCAmelCase : Any = 768
# set label information
__UpperCAmelCase : int = 150
__UpperCAmelCase : int = "huggingface/label-files"
__UpperCAmelCase : Any = "ade20k-id2label.json"
__UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) )
__UpperCAmelCase : Optional[Any] = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
__UpperCAmelCase : Tuple = SwinConfig(
embed_dim=_UpperCAmelCase , depths=_UpperCAmelCase , num_heads=_UpperCAmelCase , window_size=_UpperCAmelCase , out_features=["stage1", "stage2", "stage3", "stage4"] , )
__UpperCAmelCase : List[str] = UperNetConfig(
backbone_config=_UpperCAmelCase , auxiliary_in_channels=_UpperCAmelCase , num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase , )
return config
def _lowercase ( lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : List[Any] = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.stages.{i}.downsample.reduction.weight""", f"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.weight""", f"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.bias""", f"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : str = dct.pop(_UpperCAmelCase )
__UpperCAmelCase : Optional[int] = val
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Any = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__UpperCAmelCase : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__UpperCAmelCase : Tuple = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
__UpperCAmelCase : str = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : str = in_proj_weight[:dim, :]
__UpperCAmelCase : Dict = in_proj_bias[: dim]
__UpperCAmelCase : List[Any] = in_proj_weight[
dim : dim * 2, :
]
__UpperCAmelCase : Union[str, Any] = in_proj_bias[
dim : dim * 2
]
__UpperCAmelCase : Optional[Any] = in_proj_weight[
-dim :, :
]
__UpperCAmelCase : str = in_proj_bias[-dim :]
# fmt: on
def _lowercase ( lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = x.shape
__UpperCAmelCase : Optional[int] = x.reshape(_UpperCAmelCase , 4 , in_channel // 4 )
__UpperCAmelCase : str = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(_UpperCAmelCase , _UpperCAmelCase )
return x
def _lowercase ( lowerCamelCase__ ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = x.shape
__UpperCAmelCase : Dict = x.reshape(_UpperCAmelCase , in_channel // 4 , 4 )
__UpperCAmelCase : List[str] = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(_UpperCAmelCase , _UpperCAmelCase )
return x
def _lowercase ( lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = x.shape[0]
__UpperCAmelCase : List[Any] = x.reshape(4 , in_channel // 4 )
__UpperCAmelCase : List[str] = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(_UpperCAmelCase )
return x
def _lowercase ( lowerCamelCase__ ) -> str:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = x.shape[0]
__UpperCAmelCase : Optional[int] = x.reshape(in_channel // 4 , 4 )
__UpperCAmelCase : Dict = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(_UpperCAmelCase )
return x
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = {
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
__UpperCAmelCase : str = model_name_to_url[model_name]
__UpperCAmelCase : int = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location="cpu" , file_name=_UpperCAmelCase )[
"state_dict"
]
for name, param in state_dict.items():
print(_UpperCAmelCase , param.shape )
__UpperCAmelCase : Union[str, Any] = get_upernet_config(_UpperCAmelCase )
__UpperCAmelCase : Tuple = UperNetForSemanticSegmentation(_UpperCAmelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__UpperCAmelCase : List[str] = state_dict.pop(_UpperCAmelCase )
if "bn" in key:
__UpperCAmelCase : Optional[Any] = key.replace("bn" , "batch_norm" )
__UpperCAmelCase : Optional[Any] = val
# rename keys
__UpperCAmelCase : Tuple = create_rename_keys(_UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__UpperCAmelCase : Tuple = reverse_correct_unfold_reduction_order(_UpperCAmelCase )
if "norm" in key:
__UpperCAmelCase : Dict = reverse_correct_unfold_norm_order(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
# verify on image
__UpperCAmelCase : Optional[int] = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
__UpperCAmelCase : List[Any] = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert("RGB" )
__UpperCAmelCase : List[Any] = SegformerImageProcessor()
__UpperCAmelCase : Tuple = processor(_UpperCAmelCase , return_tensors="pt" ).pixel_values
with torch.no_grad():
__UpperCAmelCase : Any = model(_UpperCAmelCase )
__UpperCAmelCase : Any = outputs.logits
print(logits.shape )
print("First values of logits:" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
__UpperCAmelCase : Any = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
__UpperCAmelCase : List[Any] = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
__UpperCAmelCase : Union[str, Any] = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCAmelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCAmelCase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(_UpperCAmelCase )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
_a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[f"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_a : List[str] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 168 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : float):
return 0.0
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
SCREAMING_SNAKE_CASE_: Dict = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = 5_12
SCREAMING_SNAKE_CASE_: str = [1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE_: Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs]
SCREAMING_SNAKE_CASE_: Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE_: Tuple = np.abs(np.fft.fft(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_: Optional[Any] = 20 * np.logaa(_UpperCAmelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
SCREAMING_SNAKE_CASE_: Any = get_bounds(_UpperCAmelCase , _UpperCAmelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(_UpperCAmelCase )
plt.show()
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = 5_12
SCREAMING_SNAKE_CASE_: Union[str, Any] = [1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE_: Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs]
SCREAMING_SNAKE_CASE_: int = [0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE_: Any = np.angle(np.fft.fft(_UpperCAmelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(_UpperCAmelCase , -2 * pi ) )
plt.show()
| 671 | 0 |
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class _lowercase ( UpperCAmelCase_ ):
lowercase = ['''image_processor''', '''tokenizer''']
lowercase = '''BlipImageProcessor'''
lowercase = '''AutoTokenizer'''
def __init__( self : List[Any] , snake_case : Dict , snake_case : Optional[Any] , snake_case : List[str] ) -> str:
"""simple docstring"""
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
# add QFormer tokenizer
UpperCamelCase_ : Optional[Any] = qformer_tokenizer
def __call__( self : Any , snake_case : ImageInput = None , snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case : bool = True , snake_case : Union[bool, str, PaddingStrategy] = False , snake_case : Union[bool, str, TruncationStrategy] = None , snake_case : Optional[int] = None , snake_case : int = 0 , snake_case : Optional[int] = None , snake_case : Optional[bool] = None , snake_case : bool = False , snake_case : bool = False , snake_case : bool = False , snake_case : bool = False , snake_case : bool = False , snake_case : bool = True , snake_case : Optional[Union[str, TensorType]] = None , **snake_case : int , ) -> List[str]:
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
UpperCamelCase_ : Union[str, Any] = BatchFeature()
if text is not None:
UpperCamelCase_ : Union[str, Any] = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
encoding.update(lowerCAmelCase__ )
UpperCamelCase_ : Optional[Any] = self.qformer_tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
UpperCamelCase_ : Optional[Any] = qformer_text_encoding.pop('input_ids' )
UpperCamelCase_ : List[Any] = qformer_text_encoding.pop('attention_mask' )
if images is not None:
UpperCamelCase_ : Optional[int] = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
encoding.update(lowerCAmelCase__ )
return encoding
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *snake_case : List[str] , **snake_case : Optional[Any] ) -> Tuple:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any , *snake_case : List[Any] , **snake_case : str ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Any = self.tokenizer.model_input_names
UpperCamelCase_ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : Optional[Any] , **snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if os.path.isfile(lowerCAmelCase__ ):
raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
UpperCamelCase_ : List[Any] = os.path.join(lowerCAmelCase__ , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(lowerCAmelCase__ )
return super().save_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , snake_case : Dict , **snake_case : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Dict = AutoTokenizer.from_pretrained(lowerCAmelCase__ , subfolder='qformer_tokenizer' )
UpperCamelCase_ : Any = cls._get_arguments_from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
args.append(lowerCAmelCase__ )
return cls(*lowerCAmelCase__ )
| 417 |
from __future__ import annotations
from math import ceil, floor, sqrt
def A_ ( _UpperCAmelCase = 2_00_00_00 ):
SCREAMING_SNAKE_CASE_: list[int] = [0]
SCREAMING_SNAKE_CASE_: int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
SCREAMING_SNAKE_CASE_: int = 0
# the area corresponding to the grid that gives the product closest to target
SCREAMING_SNAKE_CASE_: int = 0
# an estimate of b, using the quadratic formula
SCREAMING_SNAKE_CASE_: float
# the largest integer less than b_estimate
SCREAMING_SNAKE_CASE_: int
# the largest integer less than b_estimate
SCREAMING_SNAKE_CASE_: int
# the triangle number corresponding to b_floor
SCREAMING_SNAKE_CASE_: int
# the triangle number corresponding to b_ceil
SCREAMING_SNAKE_CASE_: int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
SCREAMING_SNAKE_CASE_: List[Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
SCREAMING_SNAKE_CASE_: Any = floor(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = ceil(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Any = triangle_numbers[b_floor]
SCREAMING_SNAKE_CASE_: List[Any] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
SCREAMING_SNAKE_CASE_: int = triangle_b_first_guess * triangle_a
SCREAMING_SNAKE_CASE_: int = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
SCREAMING_SNAKE_CASE_: Optional[Any] = triangle_b_second_guess * triangle_a
SCREAMING_SNAKE_CASE_: Tuple = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'''{solution() = }''')
| 671 | 0 |
"""simple docstring"""
def lowercase_ ( _lowerCamelCase: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase : Tuple = 0
__lowerCamelCase : List[str] = len(_UpperCAmelCase )
for i in range(n - 1 ):
for j in range(i + 1 , _UpperCAmelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowercase_ ( _lowerCamelCase: int ) -> Optional[Any]:
'''simple docstring'''
if len(_UpperCAmelCase ) <= 1:
return arr, 0
__lowerCamelCase : Dict = len(_UpperCAmelCase ) // 2
__lowerCamelCase : List[Any] = arr[0:mid]
__lowerCamelCase : Tuple = arr[mid:]
__lowerCamelCase : Any = count_inversions_recursive(_UpperCAmelCase )
__lowerCamelCase : Optional[int] = count_inversions_recursive(_UpperCAmelCase )
__lowerCamelCase : str = _count_cross_inversions(_UpperCAmelCase , _UpperCAmelCase )
__lowerCamelCase : Optional[Any] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowercase_ ( _lowerCamelCase: Optional[Any] , _lowerCamelCase: Tuple ) -> List[str]:
'''simple docstring'''
__lowerCamelCase : List[Any] = []
__lowerCamelCase : str = 0
while i < len(_UpperCAmelCase ) and j < len(_UpperCAmelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(_UpperCAmelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(_UpperCAmelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowercase_ ( ) -> int:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__lowerCamelCase : str = count_inversions_bf(_UpperCAmelCase )
__lowerCamelCase : List[Any] = count_inversions_recursive(_UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , _UpperCAmelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__lowerCamelCase : Optional[int] = count_inversions_bf(_UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = count_inversions_recursive(_UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , _UpperCAmelCase )
# an empty list should also have zero inversions
__lowerCamelCase : List[Any] = []
__lowerCamelCase : List[Any] = count_inversions_bf(_UpperCAmelCase )
__lowerCamelCase : Optional[Any] = count_inversions_recursive(_UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , _UpperCAmelCase )
if __name__ == "__main__":
main() | 646 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Optional[int] = {
"""configuration_longformer""": [
"""LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LongformerConfig""",
"""LongformerOnnxConfig""",
],
"""tokenization_longformer""": ["""LongformerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = ["""LongformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = [
"""LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongformerForMaskedLM""",
"""LongformerForMultipleChoice""",
"""LongformerForQuestionAnswering""",
"""LongformerForSequenceClassification""",
"""LongformerForTokenClassification""",
"""LongformerModel""",
"""LongformerPreTrainedModel""",
"""LongformerSelfAttention""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
"""TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLongformerForMaskedLM""",
"""TFLongformerForMultipleChoice""",
"""TFLongformerForQuestionAnswering""",
"""TFLongformerForSequenceClassification""",
"""TFLongformerForTokenClassification""",
"""TFLongformerModel""",
"""TFLongformerPreTrainedModel""",
"""TFLongformerSelfAttention""",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 671 | 0 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
_UpperCAmelCase : int = """facebook/wmt19-en-de"""
_UpperCAmelCase : Tuple = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
_UpperCAmelCase : Union[str, Any] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
_UpperCAmelCase : Any = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
_UpperCAmelCase : List[str] = tokenizer(["Making tiny model"], return_tensors="pt")
_UpperCAmelCase : Any = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
_UpperCAmelCase : Optional[int] = """tiny-wmt19-en-de"""
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 668 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase : Optional[int] = [
# (stable-diffusion, HF Diffusers)
("""time_embed.0.weight""", """time_embedding.linear_1.weight"""),
("""time_embed.0.bias""", """time_embedding.linear_1.bias"""),
("""time_embed.2.weight""", """time_embedding.linear_2.weight"""),
("""time_embed.2.bias""", """time_embedding.linear_2.bias"""),
("""input_blocks.0.0.weight""", """conv_in.weight"""),
("""input_blocks.0.0.bias""", """conv_in.bias"""),
("""out.0.weight""", """conv_norm_out.weight"""),
("""out.0.bias""", """conv_norm_out.bias"""),
("""out.2.weight""", """conv_out.weight"""),
("""out.2.bias""", """conv_out.bias"""),
]
lowerCAmelCase : str = [
# (stable-diffusion, HF Diffusers)
("""in_layers.0""", """norm1"""),
("""in_layers.2""", """conv1"""),
("""out_layers.0""", """norm2"""),
("""out_layers.3""", """conv2"""),
("""emb_layers.1""", """time_emb_proj"""),
("""skip_connection""", """conv_shortcut"""),
]
lowerCAmelCase : List[str] = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase : int = f'''down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase : List[str] = f'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase : Any = f'''down_blocks.{i}.attentions.{j}.'''
lowerCAmelCase : List[Any] = f'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase : Any = f'''up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase : str = f'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase : List[Any] = f'''up_blocks.{i}.attentions.{j}.'''
lowerCAmelCase : str = f'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase : Any = f'''down_blocks.{i}.downsamplers.0.conv.'''
lowerCAmelCase : Tuple = f'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase : Tuple = f'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase : Tuple = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase : Any = """mid_block.attentions.0."""
lowerCAmelCase : Dict = """middle_block.1."""
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase : int = f'''mid_block.resnets.{j}.'''
lowerCAmelCase : Union[str, Any] = f'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def A_ ( _UpperCAmelCase ):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
SCREAMING_SNAKE_CASE_: Dict = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
SCREAMING_SNAKE_CASE_: Optional[int] = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
SCREAMING_SNAKE_CASE_: Any = v.replace(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: str = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
SCREAMING_SNAKE_CASE_: Optional[Any] = v.replace(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = v
SCREAMING_SNAKE_CASE_: Optional[Any] = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase : Union[str, Any] = [
# (stable-diffusion, HF Diffusers)
("""nin_shortcut""", """conv_shortcut"""),
("""norm_out""", """conv_norm_out"""),
("""mid.attn_1.""", """mid_block.attentions.0."""),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase : Union[str, Any] = f'''encoder.down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase : Optional[Any] = f'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase : Dict = f'''down_blocks.{i}.downsamplers.0.'''
lowerCAmelCase : List[str] = f'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase : List[str] = f'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase : int = f'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase : Any = f'''decoder.up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase : int = f'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase : str = f'''mid_block.resnets.{i}.'''
lowerCAmelCase : Tuple = f'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase : List[Any] = [
# (stable-diffusion, HF Diffusers)
("""norm.""", """group_norm."""),
("""q.""", """query."""),
("""k.""", """key."""),
("""v.""", """value."""),
("""proj_out.""", """proj_attn."""),
]
def A_ ( _UpperCAmelCase ):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
SCREAMING_SNAKE_CASE_: Union[str, Any] = v.replace(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
SCREAMING_SNAKE_CASE_: Any = v.replace(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = v
SCREAMING_SNAKE_CASE_: Tuple = {v: vae_state_dict[k] for k, v in mapping.items()}
SCREAMING_SNAKE_CASE_: Union[str, Any] = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"mid.attn_1.{weight_name}.weight" in k:
print(f"Reshaping {k} for SD format" )
SCREAMING_SNAKE_CASE_: List[str] = reshape_weight_for_sd(_UpperCAmelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase : Optional[Any] = [
# (stable-diffusion, HF Diffusers)
("""resblocks.""", """text_model.encoder.layers."""),
("""ln_1""", """layer_norm1"""),
("""ln_2""", """layer_norm2"""),
(""".c_fc.""", """.fc1."""),
(""".c_proj.""", """.fc2."""),
(""".attn""", """.self_attn"""),
("""ln_final.""", """transformer.text_model.final_layer_norm."""),
("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""),
("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""),
]
lowerCAmelCase : Optional[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase : Optional[int] = re.compile("""|""".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase : str = {"""q""": 0, """k""": 1, """v""": 2}
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: List[str] = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
SCREAMING_SNAKE_CASE_: str = k[: -len(".q_proj.weight" )]
SCREAMING_SNAKE_CASE_: Dict = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
SCREAMING_SNAKE_CASE_: Tuple = [None, None, None]
SCREAMING_SNAKE_CASE_: Union[str, Any] = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
SCREAMING_SNAKE_CASE_: Union[str, Any] = k[: -len(".q_proj.bias" )]
SCREAMING_SNAKE_CASE_: Any = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
SCREAMING_SNAKE_CASE_: List[Any] = [None, None, None]
SCREAMING_SNAKE_CASE_: List[str] = v
continue
SCREAMING_SNAKE_CASE_: int = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
SCREAMING_SNAKE_CASE_: str = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = torch.cat(_UpperCAmelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
SCREAMING_SNAKE_CASE_: Optional[int] = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = torch.cat(_UpperCAmelCase )
return new_state_dict
def A_ ( _UpperCAmelCase ):
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt."""
)
lowerCAmelCase : Optional[Any] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase : int = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""")
lowerCAmelCase : List[str] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""")
lowerCAmelCase : Optional[int] = osp.join(args.model_path, """text_encoder""", """model.safetensors""")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase : Optional[int] = load_file(unet_path, device="""cpu""")
else:
lowerCAmelCase : Union[str, Any] = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""")
lowerCAmelCase : Optional[Any] = torch.load(unet_path, map_location="""cpu""")
if osp.exists(vae_path):
lowerCAmelCase : str = load_file(vae_path, device="""cpu""")
else:
lowerCAmelCase : List[Any] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""")
lowerCAmelCase : Optional[Any] = torch.load(vae_path, map_location="""cpu""")
if osp.exists(text_enc_path):
lowerCAmelCase : List[Any] = load_file(text_enc_path, device="""cpu""")
else:
lowerCAmelCase : List[Any] = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""")
lowerCAmelCase : Optional[Any] = torch.load(text_enc_path, map_location="""cpu""")
# Convert the UNet model
lowerCAmelCase : int = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase : Optional[int] = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase : Union[str, Any] = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase : Optional[int] = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase : Any = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase : Any = {"""transformer.""" + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase : str = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase : Dict = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase : Any = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase : Optional[Any] = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase : Union[str, Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase : str = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase : int = {"""state_dict""": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 671 | 0 |
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def _lowerCAmelCase ( __magic_name__ :str ):
return {key.lstrip('''-''' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def _lowerCAmelCase ( ):
UpperCAmelCase_ = ArgumentParser(
'''HuggingFace Datasets CLI tool''' , usage='''datasets-cli <command> [<args>]''' , allow_abbrev=_UpperCAmelCase )
UpperCAmelCase_ = parser.add_subparsers(help='''datasets-cli command helpers''' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_UpperCAmelCase )
EnvironmentCommand.register_subcommand(_UpperCAmelCase )
TestCommand.register_subcommand(_UpperCAmelCase )
RunBeamCommand.register_subcommand(_UpperCAmelCase )
DummyDataCommand.register_subcommand(_UpperCAmelCase )
# Parse args
UpperCAmelCase_ = parser.parse_known_args()
if not hasattr(_UpperCAmelCase , '''func''' ):
parser.print_help()
exit(1 )
UpperCAmelCase_ = parse_unknown_args(_UpperCAmelCase )
# Run
UpperCAmelCase_ = args.func(_UpperCAmelCase , **_UpperCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 121 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"""
),
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = '''xlm-prophetnet'''
_UpperCAmelCase : Any = ['''past_key_values''']
_UpperCAmelCase : Tuple = {
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self : str , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[Union[str, Callable]] = "gelu" , lowerCAmelCase__ : Optional[int] = 3_0522 , lowerCAmelCase__ : Optional[int] = 1024 , lowerCAmelCase__ : Optional[int] = 4096 , lowerCAmelCase__ : Optional[int] = 12 , lowerCAmelCase__ : Optional[int] = 16 , lowerCAmelCase__ : Optional[int] = 4096 , lowerCAmelCase__ : Optional[int] = 12 , lowerCAmelCase__ : Optional[int] = 16 , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[int] = 512 , lowerCAmelCase__ : Optional[float] = 0.02 , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[int] = 0 , lowerCAmelCase__ : Optional[int] = 2 , lowerCAmelCase__ : Optional[int] = 32 , lowerCAmelCase__ : Optional[int] = 128 , lowerCAmelCase__ : Optional[bool] = False , lowerCAmelCase__ : Optional[float] = 0.0 , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[int] = 0 , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 2 , **lowerCAmelCase__ : List[str] , ):
SCREAMING_SNAKE_CASE_: List[Any] = vocab_size
SCREAMING_SNAKE_CASE_: int = hidden_size
SCREAMING_SNAKE_CASE_: Any = encoder_ffn_dim
SCREAMING_SNAKE_CASE_: Tuple = num_encoder_layers
SCREAMING_SNAKE_CASE_: List[Any] = num_encoder_attention_heads
SCREAMING_SNAKE_CASE_: Dict = decoder_ffn_dim
SCREAMING_SNAKE_CASE_: Any = num_decoder_layers
SCREAMING_SNAKE_CASE_: Tuple = num_decoder_attention_heads
SCREAMING_SNAKE_CASE_: str = max_position_embeddings
SCREAMING_SNAKE_CASE_: str = init_std # Normal(0, this parameter)
SCREAMING_SNAKE_CASE_: Dict = activation_function
# parameters for xlmprophetnet
SCREAMING_SNAKE_CASE_: Optional[int] = ngram
SCREAMING_SNAKE_CASE_: Tuple = num_buckets
SCREAMING_SNAKE_CASE_: Union[str, Any] = relative_max_distance
SCREAMING_SNAKE_CASE_: List[str] = disable_ngram_loss
SCREAMING_SNAKE_CASE_: Dict = eps
# 3 Types of Dropout
SCREAMING_SNAKE_CASE_: Any = attention_dropout
SCREAMING_SNAKE_CASE_: Optional[int] = activation_dropout
SCREAMING_SNAKE_CASE_: str = dropout
SCREAMING_SNAKE_CASE_: Optional[int] = use_cache
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , add_cross_attention=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Any):
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`.")
| 671 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = """▁"""
SCREAMING_SNAKE_CASE : Optional[int] = {"""vocab_file""": """sentencepiece.bpe.model"""}
SCREAMING_SNAKE_CASE : int = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"""
),
}
}
SCREAMING_SNAKE_CASE : Tuple = {
"""facebook/nllb-200-distilled-600M""": 1024,
}
# fmt: off
SCREAMING_SNAKE_CASE : Optional[int] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class _lowerCamelCase( UpperCAmelCase_ ):
lowercase_ : List[Any] = VOCAB_FILES_NAMES
lowercase_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Any = ['''input_ids''', '''attention_mask''']
lowercase_ : List[int] = []
lowercase_ : List[int] = []
def __init__( self, lowerCamelCase, lowerCamelCase="<s>", lowerCamelCase="</s>", lowerCamelCase="</s>", lowerCamelCase="<s>", lowerCamelCase="<unk>", lowerCamelCase="<pad>", lowerCamelCase="<mask>", lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase = None, lowerCamelCase=None, lowerCamelCase=False, **lowerCamelCase, ) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[Any] = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token
_lowercase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowercase : Dict = legacy_behaviour
super().__init__(
bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, src_lang=lowerCAmelCase__, tgt_lang=lowerCAmelCase__, additional_special_tokens=lowerCAmelCase__, sp_model_kwargs=self.sp_model_kwargs, legacy_behaviour=lowerCAmelCase__, **lowerCAmelCase__, )
_lowercase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(lowerCAmelCase__))
_lowercase : List[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowercase : int = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowercase : Dict = 1
_lowercase : Optional[Any] = len(self.sp_model)
_lowercase : Optional[int] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase__)
}
_lowercase : List[Any] = {v: k for k, v in self.lang_code_to_id.items()}
_lowercase : Optional[int] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
_lowercase : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_lowercase : Tuple = list(self.lang_code_to_id.keys())
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens])
_lowercase : List[Any] = src_lang if src_lang is not None else "eng_Latn"
_lowercase : Any = self.lang_code_to_id[self._src_lang]
_lowercase : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang)
def __getstate__( self) -> Dict:
"""simple docstring"""
_lowercase : Any = self.__dict__.copy()
_lowercase : Union[str, Any] = None
_lowercase : Dict = self.sp_model.serialized_model_proto()
return state
def __setstate__( self, lowerCamelCase) -> List[Any]:
"""simple docstring"""
_lowercase : Tuple = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs'):
_lowercase : List[Any] = {}
_lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
@property
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None, lowerCamelCase = False) -> List[str]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__, token_ids_a=lowerCAmelCase__, already_has_special_tokens=lowerCAmelCase__)
_lowercase : Dict = [1] * len(self.prefix_tokens)
_lowercase : Tuple = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__)) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__)) + ([0] * len(lowerCAmelCase__)) + suffix_ones
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Dict:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> int:
"""simple docstring"""
_lowercase : str = [self.sep_token_id]
_lowercase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model')
_lowercase : str = src_lang
_lowercase : Tuple = self(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__)
_lowercase : Any = self.convert_tokens_to_ids(lowerCAmelCase__)
_lowercase : Optional[Any] = tgt_lang_id
return inputs
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Dict = {self.convert_ids_to_tokens(lowerCAmelCase__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def UpperCamelCase ( self, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
return self.sp_model.encode(lowerCAmelCase__, out_type=lowerCAmelCase__)
def UpperCamelCase ( self, lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowercase : Dict = self.sp_model.PieceToId(lowerCAmelCase__)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Optional[Any] = "".join(lowerCAmelCase__).replace(lowerCAmelCase__, ' ').strip()
return out_string
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> List[str]:
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase__):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
_lowercase : List[str] = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, lowerCAmelCase__)
elif not os.path.isfile(self.vocab_file):
with open(lowerCAmelCase__, 'wb') as fi:
_lowercase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__)
return (out_vocab_file,)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = "eng_Latn", lowerCamelCase = None, lowerCamelCase = "fra_Latn", **lowerCamelCase, ) -> Dict:
"""simple docstring"""
_lowercase : Dict = src_lang
_lowercase : Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang)
def UpperCamelCase ( self, lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Optional[int] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
_lowercase : Any = []
_lowercase : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
_lowercase : str = [self.cur_lang_code]
_lowercase : str = [self.eos_token_id]
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : str = self.lang_code_to_id[lang]
if self.legacy_behaviour:
_lowercase : Union[str, Any] = []
_lowercase : str = [self.eos_token_id, self.cur_lang_code]
else:
_lowercase : int = [self.cur_lang_code]
_lowercase : Any = [self.eos_token_id]
| 89 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Dict = logging.get_logger(__name__)
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = b.T
SCREAMING_SNAKE_CASE_: Dict = np.sum(np.square(_UpperCAmelCase ) , axis=1 )
SCREAMING_SNAKE_CASE_: Tuple = np.sum(np.square(_UpperCAmelCase ) , axis=0 )
SCREAMING_SNAKE_CASE_: List[Any] = np.matmul(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = aa[:, None] - 2 * ab + ba[None, :]
return d
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = x.reshape(-1 , 3 )
SCREAMING_SNAKE_CASE_: Tuple = squared_euclidean_distance(_UpperCAmelCase , _UpperCAmelCase )
return np.argmin(_UpperCAmelCase , axis=1 )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : int = ['''pixel_values''']
def __init__( self : Tuple , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : List[str] , ):
super().__init__(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = size if size is not None else {"height": 256, "width": 256}
SCREAMING_SNAKE_CASE_: Tuple = get_size_dict(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = np.array(lowerCAmelCase__) if clusters is not None else None
SCREAMING_SNAKE_CASE_: Dict = do_resize
SCREAMING_SNAKE_CASE_: str = size
SCREAMING_SNAKE_CASE_: List[Any] = resample
SCREAMING_SNAKE_CASE_: Optional[int] = do_normalize
SCREAMING_SNAKE_CASE_: Dict = do_color_quantize
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ):
SCREAMING_SNAKE_CASE_: List[str] = get_size_dict(lowerCAmelCase__)
if "height" not in size or "width" not in size:
raise ValueError(F"Size dictionary must contain both height and width keys. Got {size.keys()}")
return resize(
lowerCAmelCase__ , size=(size["height"], size["width"]) , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , ):
SCREAMING_SNAKE_CASE_: str = rescale(image=lowerCAmelCase__ , scale=1 / 127.5 , data_format=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = image - 1
return image
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **lowerCAmelCase__ : Union[str, Any] , ):
SCREAMING_SNAKE_CASE_: Tuple = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_: Optional[int] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_: Dict = get_size_dict(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_: int = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_: List[str] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
SCREAMING_SNAKE_CASE_: Tuple = clusters if clusters is not None else self.clusters
SCREAMING_SNAKE_CASE_: Optional[int] = np.array(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True.")
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_: Union[str, Any] = [to_numpy_array(lowerCAmelCase__) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_: Optional[Any] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_: str = [self.normalize(image=lowerCAmelCase__) for image in images]
if do_color_quantize:
SCREAMING_SNAKE_CASE_: Any = [to_channel_dimension_format(lowerCAmelCase__ , ChannelDimension.LAST) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
SCREAMING_SNAKE_CASE_: List[Any] = np.array(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = color_quantize(lowerCAmelCase__ , lowerCAmelCase__).reshape(images.shape[:-1])
# flatten to (batch_size, height*width)
SCREAMING_SNAKE_CASE_: str = images.shape[0]
SCREAMING_SNAKE_CASE_: Tuple = images.reshape(lowerCAmelCase__ , -1)
# We need to convert back to a list of images to keep consistent behaviour across processors.
SCREAMING_SNAKE_CASE_: str = list(lowerCAmelCase__)
else:
SCREAMING_SNAKE_CASE_: Dict = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__) for image in images]
SCREAMING_SNAKE_CASE_: Optional[Any] = {"input_ids": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__)
| 671 | 0 |
# Lint as: python3
import itertools
import os
import re
lowerCamelCase__ = re.compile(R"([A-Z]+)([A-Z][a-z])")
lowerCamelCase__ = re.compile(R"([a-z\d])([A-Z])")
lowerCamelCase__ = re.compile(R"(?<!_)_(?!_)")
lowerCamelCase__ = re.compile(R"(_{2,})")
lowerCamelCase__ = R"""^\w+(\.\w+)*$"""
lowerCamelCase__ = R"""<>:/\|?*"""
def __A(lowerCAmelCase ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = _uppercase_uppercase_re.sub(R"""\1_\2""" , _UpperCAmelCase )
_UpperCamelCase = _lowercase_uppercase_re.sub(R"""\1_\2""" , _UpperCAmelCase )
return name.lower()
def __A(lowerCAmelCase ) -> Dict:
"""simple docstring"""
_UpperCamelCase = _single_underscore_re.split(_UpperCAmelCase )
_UpperCamelCase = [_multiple_underscores_re.split(_UpperCAmelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_UpperCAmelCase ) if n != """""" )
def __A(lowerCAmelCase ) -> Tuple:
"""simple docstring"""
if os.path.basename(_UpperCAmelCase ) != name:
raise ValueError(F'Should be a dataset name, not a path: {name}' )
return camelcase_to_snakecase(_UpperCAmelCase )
def __A(lowerCAmelCase , lowerCAmelCase ) -> Tuple:
"""simple docstring"""
if os.path.basename(_UpperCAmelCase ) != name:
raise ValueError(F'Should be a dataset name, not a path: {name}' )
if not re.match(_split_re , _UpperCAmelCase ):
raise ValueError(F'Split name should match \'{_split_re}\'\' but got \'{split}\'.' )
return F'{filename_prefix_for_name(_UpperCAmelCase )}-{split}'
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = filename_prefix_for_split(_UpperCAmelCase , _UpperCAmelCase )
if filetype_suffix:
prefix += F'.{filetype_suffix}'
_UpperCamelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
return F'{filepath}*'
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = filename_prefix_for_split(_UpperCAmelCase , _UpperCAmelCase )
_UpperCamelCase = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if shard_lengths:
_UpperCamelCase = len(_UpperCAmelCase )
_UpperCamelCase = [F'{prefix}-{shard_id:05d}-of-{num_shards:05d}' for shard_id in range(_UpperCAmelCase )]
if filetype_suffix:
_UpperCamelCase = [filename + F'.{filetype_suffix}' for filename in filenames]
return filenames
else:
_UpperCamelCase = prefix
if filetype_suffix:
filename += F'.{filetype_suffix}'
return [filename]
| 612 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Tuple = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : int = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
lowerCAmelCase : int = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
lowerCAmelCase : List[Any] = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
lowerCAmelCase : Optional[int] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowerCAmelCase : Optional[int] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowerCAmelCase : List[str] = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Any = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : str = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase : List[Any] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
lowerCAmelCase : Optional[Any] = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
lowerCAmelCase : int = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase_ )
class __lowercase :
"""simple docstring"""
def __call__( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[bool] = None , **lowerCAmelCase__ : Tuple , ):
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE_: List[str] = titles if texts is None else texts
return super().__call__(
lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Optional[int] = titles if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [titles]
SCREAMING_SNAKE_CASE_: int = texts if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [texts]
SCREAMING_SNAKE_CASE_: str = len(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = questions if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [questions] * n_passages
if len(lowerCAmelCase__) != len(lowerCAmelCase__):
raise ValueError(
F"There should be as many titles than texts but got {len(lowerCAmelCase__)} titles and {len(lowerCAmelCase__)} texts.")
SCREAMING_SNAKE_CASE_: Optional[Any] = super().__call__(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)["input_ids"]
SCREAMING_SNAKE_CASE_: Union[str, Any] = super().__call__(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)["input_ids"]
SCREAMING_SNAKE_CASE_: int = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase__ , lowerCAmelCase__)
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE_: Dict = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
SCREAMING_SNAKE_CASE_: int = attention_mask
return self.pad(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : BatchEncoding , lowerCAmelCase__ : DPRReaderOutput , lowerCAmelCase__ : int = 16 , lowerCAmelCase__ : int = 64 , lowerCAmelCase__ : int = 4 , ):
SCREAMING_SNAKE_CASE_: int = reader_input["input_ids"]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = reader_output[:3]
SCREAMING_SNAKE_CASE_: Tuple = len(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = sorted(range(lowerCAmelCase__) , reverse=lowerCAmelCase__ , key=relevance_logits.__getitem__)
SCREAMING_SNAKE_CASE_: List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE_: Optional[int] = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE_: str = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE_: List[Any] = sequence_ids.index(self.pad_token_id)
else:
SCREAMING_SNAKE_CASE_: Dict = len(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase__ , top_spans=lowerCAmelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase__ , start_index=lowerCAmelCase__ , end_index=lowerCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(lowerCAmelCase__) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , ):
SCREAMING_SNAKE_CASE_: Any = []
for start_index, start_score in enumerate(lowerCAmelCase__):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
SCREAMING_SNAKE_CASE_: Union[str, Any] = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__: x[1] , reverse=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"Wrong span indices: [{start_index}:{end_index}]")
SCREAMING_SNAKE_CASE_: int = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"Span is too long: {length} > {max_answer_length}")
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(lowerCAmelCase__) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase_ )
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[Any] = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase : str = ['''input_ids''', '''attention_mask''']
| 671 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def __lowerCamelCase ( __UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = botoa.client("iam" )
lowerCAmelCase_ : List[Any] = {
"Version": "2012-10-17",
"Statement": [
{"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=_UpperCAmelCase , AssumeRolePolicyDocument=json.dumps(_UpperCAmelCase , indent=2 ) )
lowerCAmelCase_ : Dict = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sagemaker:*",
"ecr:GetDownloadUrlForLayer",
"ecr:BatchGetImage",
"ecr:BatchCheckLayerAvailability",
"ecr:GetAuthorizationToken",
"cloudwatch:PutMetricData",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:GetLogEvents",
"s3:CreateBucket",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:PutObject",
],
"Resource": "*",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=_UpperCAmelCase , PolicyName=f'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(_UpperCAmelCase , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f'''role {role_name} already exists. Using existing one''' )
def __lowerCamelCase ( __UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : List[str] = botoa.client("iam" )
return iam_client.get_role(RoleName=_UpperCAmelCase )["Role"]["Arn"]
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
lowerCAmelCase_ : List[str] = _ask_options(
"How do you want to authorize?" , ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "] , _UpperCAmelCase , )
lowerCAmelCase_ : Dict = None
if credentials_configuration == 0:
lowerCAmelCase_ : Union[str, Any] = _ask_field("Enter your AWS Profile name: [default] " , default="default" )
lowerCAmelCase_ : int = aws_profile
else:
print(
"Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"
"`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" )
lowerCAmelCase_ : List[str] = _ask_field("AWS Access Key ID: " )
lowerCAmelCase_ : Optional[int] = aws_access_key_id
lowerCAmelCase_ : Union[str, Any] = _ask_field("AWS Secret Access Key: " )
lowerCAmelCase_ : Union[str, Any] = aws_secret_access_key
lowerCAmelCase_ : str = _ask_field("Enter your AWS Region: [us-east-1]" , default="us-east-1" )
lowerCAmelCase_ : Any = aws_region
lowerCAmelCase_ : Dict = _ask_options(
"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?" , ["Provide IAM Role name", "Create new IAM role using credentials"] , _UpperCAmelCase , )
if role_management == 0:
lowerCAmelCase_ : Tuple = _ask_field("Enter your IAM role name: " )
else:
lowerCAmelCase_ : str = "accelerate_sagemaker_execution_role"
print(f'''Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials''' )
_create_iam_role_for_sagemaker(_UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = _ask_field(
"Do you want to use custom Docker image? [yes/NO]: " , _convert_yes_no_to_bool , default=_UpperCAmelCase , error_message="Please enter yes or no." , )
lowerCAmelCase_ : str = None
if is_custom_docker_image:
lowerCAmelCase_ : Dict = _ask_field("Enter your Docker image: " , lambda __UpperCamelCase : str(_UpperCAmelCase ).lower() )
lowerCAmelCase_ : List[str] = _ask_field(
"Do you want to provide SageMaker input channels with data locations? [yes/NO]: " , _convert_yes_no_to_bool , default=_UpperCAmelCase , error_message="Please enter yes or no." , )
lowerCAmelCase_ : List[str] = None
if is_sagemaker_inputs_enabled:
lowerCAmelCase_ : str = _ask_field(
"Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): " , lambda __UpperCamelCase : str(_UpperCAmelCase ).lower() , )
lowerCAmelCase_ : Dict = _ask_field(
"Do you want to enable SageMaker metrics? [yes/NO]: " , _convert_yes_no_to_bool , default=_UpperCAmelCase , error_message="Please enter yes or no." , )
lowerCAmelCase_ : Any = None
if is_sagemaker_metrics_enabled:
lowerCAmelCase_ : Union[str, Any] = _ask_field(
"Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): " , lambda __UpperCamelCase : str(_UpperCAmelCase ).lower() , )
lowerCAmelCase_ : List[Any] = _ask_options(
"What is the distributed mode?" , ["No distributed training", "Data parallelism"] , _convert_sagemaker_distributed_mode , )
lowerCAmelCase_ : Tuple = {}
lowerCAmelCase_ : str = _ask_field(
"Do you wish to optimize your script with torch dynamo?[yes/NO]:" , _convert_yes_no_to_bool , default=_UpperCAmelCase , error_message="Please enter yes or no." , )
if use_dynamo:
lowerCAmelCase_ : str = "dynamo_"
lowerCAmelCase_ : Optional[int] = _ask_options(
"Which dynamo backend would you like to use?" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
lowerCAmelCase_ : Tuple = _ask_field(
"Do you want to customize the defaults sent to torch.compile? [yes/NO]: " , _convert_yes_no_to_bool , default=_UpperCAmelCase , error_message="Please enter yes or no." , )
if use_custom_options:
lowerCAmelCase_ : Optional[Any] = _ask_options(
"Which mode do you want to use?" , _UpperCAmelCase , lambda __UpperCamelCase : TORCH_DYNAMO_MODES[int(_UpperCAmelCase )] , default="default" , )
lowerCAmelCase_ : Any = _ask_field(
"Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: " , _convert_yes_no_to_bool , default=_UpperCAmelCase , error_message="Please enter yes or no." , )
lowerCAmelCase_ : Optional[Any] = _ask_field(
"Do you want to enable dynamic shape tracing? [yes/NO]: " , _convert_yes_no_to_bool , default=_UpperCAmelCase , error_message="Please enter yes or no." , )
lowerCAmelCase_ : List[Any] = "Which EC2 instance type you want to use for your training?"
if distributed_type != SageMakerDistributedType.NO:
lowerCAmelCase_ : Union[str, Any] = _ask_options(
_UpperCAmelCase , _UpperCAmelCase , lambda __UpperCamelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(_UpperCAmelCase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
lowerCAmelCase_ : Tuple = _ask_field(_UpperCAmelCase , lambda __UpperCamelCase : str(_UpperCAmelCase ).lower() , default="ml.p3.2xlarge" )
lowerCAmelCase_ : Dict = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
lowerCAmelCase_ : List[Any] = _ask_field(
"How many machines do you want use? [1]: " , _UpperCAmelCase , default=1 , )
lowerCAmelCase_ : Optional[Any] = _ask_options(
"Do you wish to use FP16 or BF16 (mixed precision)?" , ["no", "fp16", "bf16", "fp8"] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." )
return SageMakerConfig(
image_uri=_UpperCAmelCase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=_UpperCAmelCase , use_cpu=_UpperCAmelCase , dynamo_config=_UpperCAmelCase , eca_instance_type=_UpperCAmelCase , profile=_UpperCAmelCase , region=_UpperCAmelCase , iam_role_name=_UpperCAmelCase , mixed_precision=_UpperCAmelCase , num_machines=_UpperCAmelCase , sagemaker_inputs_file=_UpperCAmelCase , sagemaker_metrics_file=_UpperCAmelCase , )
| 610 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = DistilBertTokenizer
_UpperCAmelCase : Union[str, Any] = DistilBertTokenizerFast
_UpperCAmelCase : int = True
@slow
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[Any] = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")
SCREAMING_SNAKE_CASE_: Any = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 671 | 0 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase ( UpperCAmelCase_,unittest.TestCase ):
'''simple docstring'''
a_ : Optional[int] = AudioLDMPipeline
a_ : List[Any] = TEXT_TO_AUDIO_PARAMS
a_ : Any = TEXT_TO_AUDIO_BATCH_PARAMS
a_ : Optional[int] = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def _snake_case ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase : str = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(3_2, 6_4) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=lowerCAmelCase__ , )
__lowerCamelCase : Dict = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
__lowerCamelCase : Optional[int] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase : Dict = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
__lowerCamelCase : List[Any] = ClapTextModelWithProjection(lowerCAmelCase__ )
__lowerCamelCase : List[Any] = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=7_7 )
__lowerCamelCase : Dict = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=lowerCAmelCase__ , )
__lowerCamelCase : Any = SpeechTaHifiGan(lowerCAmelCase__ )
__lowerCamelCase : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def _snake_case ( self : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict=0 ):
'''simple docstring'''
if str(lowerCAmelCase__ ).startswith("""mps""" ):
__lowerCamelCase : Dict = torch.manual_seed(lowerCAmelCase__ )
else:
__lowerCamelCase : Union[str, Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__lowerCamelCase : Union[str, Any] = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def _snake_case ( self : List[str] ):
'''simple docstring'''
__lowerCamelCase : str = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : int = self.get_dummy_components()
__lowerCamelCase : List[str] = AudioLDMPipeline(**lowerCAmelCase__ )
__lowerCamelCase : Optional[int] = audioldm_pipe.to(lowerCAmelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__lowerCamelCase : str = self.get_dummy_inputs(lowerCAmelCase__ )
__lowerCamelCase : Optional[Any] = audioldm_pipe(**lowerCAmelCase__ )
__lowerCamelCase : Dict = output.audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase__ ) == 2_5_6
__lowerCamelCase : Dict = audio[:1_0]
__lowerCamelCase : List[Any] = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = self.get_dummy_components()
__lowerCamelCase : Tuple = AudioLDMPipeline(**lowerCAmelCase__ )
__lowerCamelCase : Union[str, Any] = audioldm_pipe.to(lowerCAmelCase__ )
__lowerCamelCase : Union[str, Any] = audioldm_pipe.to(lowerCAmelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__lowerCamelCase : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
__lowerCamelCase : Any = 3 * [inputs["prompt"]]
# forward
__lowerCamelCase : List[Any] = audioldm_pipe(**lowerCAmelCase__ )
__lowerCamelCase : List[Any] = output.audios[0]
__lowerCamelCase : Any = self.get_dummy_inputs(lowerCAmelCase__ )
__lowerCamelCase : int = 3 * [inputs.pop("""prompt""" )]
__lowerCamelCase : Tuple = audioldm_pipe.tokenizer(
lowerCAmelCase__ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors="""pt""" , )
__lowerCamelCase : List[Any] = text_inputs["input_ids"].to(lowerCAmelCase__ )
__lowerCamelCase : Union[str, Any] = audioldm_pipe.text_encoder(
lowerCAmelCase__ , )
__lowerCamelCase : Dict = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__lowerCamelCase : str = F.normalize(lowerCAmelCase__ , dim=-1 )
__lowerCamelCase : Any = prompt_embeds
# forward
__lowerCamelCase : Optional[Any] = audioldm_pipe(**lowerCAmelCase__ )
__lowerCamelCase : Tuple = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = self.get_dummy_components()
__lowerCamelCase : Union[str, Any] = AudioLDMPipeline(**lowerCAmelCase__ )
__lowerCamelCase : Dict = audioldm_pipe.to(lowerCAmelCase__ )
__lowerCamelCase : List[str] = audioldm_pipe.to(lowerCAmelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__lowerCamelCase : Tuple = self.get_dummy_inputs(lowerCAmelCase__ )
__lowerCamelCase : int = 3 * ["this is a negative prompt"]
__lowerCamelCase : str = negative_prompt
__lowerCamelCase : Optional[int] = 3 * [inputs["prompt"]]
# forward
__lowerCamelCase : Tuple = audioldm_pipe(**lowerCAmelCase__ )
__lowerCamelCase : Tuple = output.audios[0]
__lowerCamelCase : str = self.get_dummy_inputs(lowerCAmelCase__ )
__lowerCamelCase : Optional[Any] = 3 * [inputs.pop("""prompt""" )]
__lowerCamelCase : Optional[Any] = []
for p in [prompt, negative_prompt]:
__lowerCamelCase : List[Any] = audioldm_pipe.tokenizer(
lowerCAmelCase__ , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCAmelCase__ , return_tensors="""pt""" , )
__lowerCamelCase : Dict = text_inputs["input_ids"].to(lowerCAmelCase__ )
__lowerCamelCase : List[Any] = audioldm_pipe.text_encoder(
lowerCAmelCase__ , )
__lowerCamelCase : Any = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__lowerCamelCase : Optional[Any] = F.normalize(lowerCAmelCase__ , dim=-1 )
embeds.append(lowerCAmelCase__ )
__lowerCamelCase : Optional[Any] = embeds
# forward
__lowerCamelCase : List[Any] = audioldm_pipe(**lowerCAmelCase__ )
__lowerCamelCase : List[str] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
__lowerCamelCase : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Any = self.get_dummy_components()
__lowerCamelCase : Dict = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
__lowerCamelCase : Optional[int] = AudioLDMPipeline(**lowerCAmelCase__ )
__lowerCamelCase : Any = audioldm_pipe.to(lowerCAmelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__lowerCamelCase : int = self.get_dummy_inputs(lowerCAmelCase__ )
__lowerCamelCase : Dict = "egg cracking"
__lowerCamelCase : Dict = audioldm_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
__lowerCamelCase : Any = output.audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase__ ) == 2_5_6
__lowerCamelCase : Optional[int] = audio[:1_0]
__lowerCamelCase : List[str] = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowerCamelCase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Union[str, Any] = self.get_dummy_components()
__lowerCamelCase : Optional[Any] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
__lowerCamelCase : int = AudioLDMPipeline(**lowerCAmelCase__ )
__lowerCamelCase : int = audioldm_pipe.to(lowerCAmelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__lowerCamelCase : Dict = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
__lowerCamelCase : str = audioldm_pipe(lowerCAmelCase__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
__lowerCamelCase : Tuple = 2
__lowerCamelCase : Tuple = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
__lowerCamelCase : str = 2
__lowerCamelCase : Union[str, Any] = audioldm_pipe(lowerCAmelCase__ , num_inference_steps=2 , num_waveforms_per_prompt=lowerCAmelCase__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
__lowerCamelCase : int = 2
__lowerCamelCase : Optional[Any] = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=lowerCAmelCase__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : List[str] = self.get_dummy_components()
__lowerCamelCase : int = AudioLDMPipeline(**lowerCAmelCase__ )
__lowerCamelCase : List[str] = audioldm_pipe.to(lowerCAmelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__lowerCamelCase : Optional[Any] = audioldm_pipe.vocoder.config.sampling_rate
__lowerCamelCase : List[Any] = self.get_dummy_inputs(lowerCAmelCase__ )
__lowerCamelCase : Tuple = audioldm_pipe(audio_length_in_s=0.016 , **lowerCAmelCase__ )
__lowerCamelCase : str = output.audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase__ ) / vocoder_sampling_rate == 0.016
__lowerCamelCase : Union[str, Any] = audioldm_pipe(audio_length_in_s=0.032 , **lowerCAmelCase__ )
__lowerCamelCase : Dict = output.audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase__ ) / vocoder_sampling_rate == 0.032
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : Dict = self.get_dummy_components()
__lowerCamelCase : Union[str, Any] = AudioLDMPipeline(**lowerCAmelCase__ )
__lowerCamelCase : List[Any] = audioldm_pipe.to(lowerCAmelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__lowerCamelCase : str = ["hey"]
__lowerCamelCase : Tuple = audioldm_pipe(lowerCAmelCase__ , num_inference_steps=1 )
__lowerCamelCase : List[str] = output.audios.shape
assert audio_shape == (1, 2_5_6)
__lowerCamelCase : str = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
__lowerCamelCase : Optional[int] = SpeechTaHifiGan(lowerCAmelCase__ ).to(lowerCAmelCase__ )
__lowerCamelCase : str = audioldm_pipe(lowerCAmelCase__ , num_inference_steps=1 )
__lowerCamelCase : Any = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def _snake_case ( self : Any ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCAmelCase__ )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=lowerCAmelCase__ )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _snake_case ( self : List[str] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase__ )
@slow
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int="cpu" , _lowerCamelCase : List[Any]=torch.floataa , _lowerCamelCase : str=0 ):
'''simple docstring'''
__lowerCamelCase : Tuple = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
__lowerCamelCase : Dict = np.random.RandomState(lowerCAmelCase__ ).standard_normal((1, 8, 1_2_8, 1_6) )
__lowerCamelCase : Optional[int] = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
__lowerCamelCase : Optional[Any] = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
__lowerCamelCase : List[Any] = audioldm_pipe.to(lowerCAmelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__lowerCamelCase : List[Any] = self.get_inputs(lowerCAmelCase__ )
__lowerCamelCase : str = 2_5
__lowerCamelCase : str = audioldm_pipe(**lowerCAmelCase__ ).audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase__ ) == 8_1_9_2_0
__lowerCamelCase : Union[str, Any] = audio[7_7_2_3_0:7_7_2_4_0]
__lowerCamelCase : Any = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
__lowerCamelCase : Union[str, Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def _snake_case ( self : Any ):
'''simple docstring'''
__lowerCamelCase : Any = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
__lowerCamelCase : int = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
__lowerCamelCase : Union[str, Any] = audioldm_pipe.to(lowerCAmelCase__ )
audioldm_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__lowerCamelCase : int = self.get_inputs(lowerCAmelCase__ )
__lowerCamelCase : Optional[int] = audioldm_pipe(**lowerCAmelCase__ ).audios[0]
assert audio.ndim == 1
assert len(lowerCAmelCase__ ) == 8_1_9_2_0
__lowerCamelCase : Union[str, Any] = audio[2_7_7_8_0:2_7_7_9_0]
__lowerCamelCase : Tuple = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
__lowerCamelCase : str = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 519 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowerCAmelCase : List[Any] = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = test_results.split(" " )
SCREAMING_SNAKE_CASE_: Tuple = 0
SCREAMING_SNAKE_CASE_: str = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
SCREAMING_SNAKE_CASE_: Optional[Any] = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(_UpperCAmelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: Any = None
SCREAMING_SNAKE_CASE_: Union[str, Any] = False
for line in failures_short_lines.split("\n" ):
if re.search(R"_ \[doctest\]" , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = True
SCREAMING_SNAKE_CASE_: Dict = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
SCREAMING_SNAKE_CASE_: Union[str, Any] = line
SCREAMING_SNAKE_CASE_: List[str] = False
return failures
class __lowercase :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Dict = title
SCREAMING_SNAKE_CASE_: int = doc_test_results["time_spent"].split(",")[0]
SCREAMING_SNAKE_CASE_: int = doc_test_results["success"]
SCREAMING_SNAKE_CASE_: Optional[Any] = doc_test_results["failures"]
SCREAMING_SNAKE_CASE_: Any = self.n_success + self.n_failures
# Failures and success of the modeling tests
SCREAMING_SNAKE_CASE_: Optional[int] = doc_test_results
@property
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: int = [self._time_spent]
SCREAMING_SNAKE_CASE_: List[Any] = 0
for time in time_spent:
SCREAMING_SNAKE_CASE_: Union[str, Any] = time.split(":")
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowerCAmelCase__) == 1:
SCREAMING_SNAKE_CASE_: Dict = [0, 0, time_parts[0]]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = int(time_parts[0]), int(time_parts[1]), float(time_parts[2])
total_secs += hours * 3600 + minutes * 60 + seconds
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F"{int(lowerCAmelCase__)}h{int(lowerCAmelCase__)}m{int(lowerCAmelCase__)}s"
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
F" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[Any] = 40
SCREAMING_SNAKE_CASE_: List[str] = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase__ , lowerCAmelCase__)}
SCREAMING_SNAKE_CASE_: Tuple = ""
for category, failures in category_failures.items():
if len(lowerCAmelCase__) == 0:
continue
if report != "":
report += "\n\n"
report += F"*{category} failures*:".ljust(line_length // 2).rjust(line_length // 2) + "\n"
report += "`"
report += "`\n`".join(lowerCAmelCase__)
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: Optional[Any] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures)
if self.n_failures > 0:
blocks.extend([self.category_failures])
if self.n_failures == 0:
blocks.append(self.no_failures)
return json.dumps(lowerCAmelCase__)
@staticmethod
def _SCREAMING_SNAKE_CASE ( ):
SCREAMING_SNAKE_CASE_: List[str] = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print("Sending the following payload")
print(json.dumps({"blocks": json.loads(lowerCAmelCase__)}))
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=lowerCAmelCase__ , )
def _SCREAMING_SNAKE_CASE ( self : Tuple):
print("Sending the following payload")
print(json.dumps({"blocks": json.loads(self.payload)}))
SCREAMING_SNAKE_CASE_: Optional[Any] = F"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed."
SCREAMING_SNAKE_CASE_: List[Any] = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=lowerCAmelCase__ , )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Dict = ""
for key, value in failures.items():
SCREAMING_SNAKE_CASE_: str = value[:200] + " [Truncated]" if len(lowerCAmelCase__) > 250 else value
failures_text += F"*{key}*\n_{value}_\n\n"
SCREAMING_SNAKE_CASE_: Any = job_name
SCREAMING_SNAKE_CASE_: List[Any] = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
SCREAMING_SNAKE_CASE_: Tuple = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _SCREAMING_SNAKE_CASE ( self : Any):
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made.")
SCREAMING_SNAKE_CASE_: Tuple = self.doc_test_results.pop("job_link")
self.doc_test_results.pop("failures")
self.doc_test_results.pop("success")
self.doc_test_results.pop("time_spent")
SCREAMING_SNAKE_CASE_: Any = sorted(self.doc_test_results.items() , key=lambda lowerCAmelCase__: t[0])
for job, job_result in sorted_dict:
if len(job_result["failures"]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = F"*Num failures* :{len(job_result['failed'])} \n"
SCREAMING_SNAKE_CASE_: Optional[Any] = job_result["failures"]
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_reply_blocks(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , text=lowerCAmelCase__)
print("Sending the following reply")
print(json.dumps({"blocks": blocks}))
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F"Results for {job}" , blocks=lowerCAmelCase__ , thread_ts=self.thread_ts["ts"] , )
time.sleep(1)
def A_ ( ):
SCREAMING_SNAKE_CASE_: Tuple = os.environ["GITHUB_RUN_ID"]
SCREAMING_SNAKE_CASE_: Any = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
SCREAMING_SNAKE_CASE_: List[Any] = requests.get(_UpperCAmelCase ).json()
SCREAMING_SNAKE_CASE_: Optional[Any] = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
SCREAMING_SNAKE_CASE_: Any = math.ceil((result["total_count"] - 1_00) / 1_00 )
for i in range(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , _UpperCAmelCase )
return {}
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = {}
if os.path.exists(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = os.listdir(_UpperCAmelCase )
for file in files:
try:
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE_: Dict = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(_UpperCAmelCase , _UpperCAmelCase )}." ) from e
return _artifact
def A_ ( ):
class __lowercase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: Dict = name
SCREAMING_SNAKE_CASE_: List[str] = []
def __str__( self : Optional[Any]):
return self.name
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : str):
self.paths.append({"name": self.name, "path": path})
SCREAMING_SNAKE_CASE_: Dict[str, Artifact] = {}
SCREAMING_SNAKE_CASE_: List[Any] = filter(os.path.isdir , os.listdir() )
for directory in directories:
SCREAMING_SNAKE_CASE_: Dict = directory
if artifact_name not in _available_artifacts:
SCREAMING_SNAKE_CASE_: Tuple = Artifact(_UpperCAmelCase )
_available_artifacts[artifact_name].add_path(_UpperCAmelCase )
return _available_artifacts
if __name__ == "__main__":
lowerCAmelCase : Tuple = get_job_links()
lowerCAmelCase : Optional[Any] = retrieve_available_artifacts()
lowerCAmelCase : Any = collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowerCAmelCase : int = {
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowerCAmelCase : Optional[int] = github_actions_job_links.get("""run_doctests""")
lowerCAmelCase : List[Any] = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
lowerCAmelCase : Any = retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = handle_test_results(artifact["""stats"""])
lowerCAmelCase : List[str] = failed
lowerCAmelCase : Any = success
lowerCAmelCase : Dict = time_spent[1:-1] + """, """
lowerCAmelCase : str = extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
lowerCAmelCase : Tuple = line.replace("""FAILED """, """""")
lowerCAmelCase : str = line.split()[0].replace("""\n""", """""")
if "::" in line:
lowerCAmelCase , lowerCAmelCase : Optional[int] = line.split("""::""")
else:
lowerCAmelCase , lowerCAmelCase : str = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowerCAmelCase : str = docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowerCAmelCase : str = all_failures[test] if test in all_failures else """N/A"""
lowerCAmelCase : Any = failure
break
lowerCAmelCase : Union[str, Any] = Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 671 | 0 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase__ : Tuple = 500_000
lowerCamelCase__ : Optional[Any] = os.path.split(__file__)
lowerCamelCase__ : Dict = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def UpperCAmelCase_ ( __UpperCAmelCase : str , **__UpperCAmelCase : List[Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = dataset.map(**_UpperCAmelCase )
@get_duration
def UpperCAmelCase_ ( __UpperCAmelCase : Tuple , **__UpperCAmelCase : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = dataset.filter(**_UpperCAmelCase )
def UpperCAmelCase_ ( ) -> int:
SCREAMING_SNAKE_CASE_ = {"num examples": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
SCREAMING_SNAKE_CASE_ = generate_example_dataset(
os.path.join(_UpperCAmelCase , 'dataset.arrow' ) , _UpperCAmelCase , num_examples=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=_UpperCAmelCase )
def tokenize(__UpperCAmelCase : List[str] ):
return tokenizer(examples['text'] )
SCREAMING_SNAKE_CASE_ = map(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = map(_UpperCAmelCase , batched=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = map(_UpperCAmelCase , function=lambda __UpperCAmelCase : None , batched=_UpperCAmelCase )
with dataset.formatted_as(type='numpy' ):
SCREAMING_SNAKE_CASE_ = map(_UpperCAmelCase , function=lambda __UpperCAmelCase : None , batched=_UpperCAmelCase )
with dataset.formatted_as(type='pandas' ):
SCREAMING_SNAKE_CASE_ = map(_UpperCAmelCase , function=lambda __UpperCAmelCase : None , batched=_UpperCAmelCase )
with dataset.formatted_as(type='torch' , columns='numbers' ):
SCREAMING_SNAKE_CASE_ = map(_UpperCAmelCase , function=lambda __UpperCAmelCase : None , batched=_UpperCAmelCase )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
SCREAMING_SNAKE_CASE_ = map(_UpperCAmelCase , function=lambda __UpperCAmelCase : None , batched=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = map(_UpperCAmelCase , function=_UpperCAmelCase , batched=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = filter(_UpperCAmelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(_UpperCAmelCase , 'wb' ) as f:
f.write(json.dumps(_UpperCAmelCase ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter() | 31 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : str = 16
lowerCAmelCase : List[Any] = 32
def A_ ( _UpperCAmelCase , _UpperCAmelCase = 16 ):
SCREAMING_SNAKE_CASE_: List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE_: Tuple = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_: str = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_: List[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_: Tuple = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_: int = 8
else:
SCREAMING_SNAKE_CASE_: Any = None
return tokenizer.pad(
_UpperCAmelCase , padding="longest" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Optional[Any] = DataLoader(
tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple = DataLoader(
tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase : Optional[int] = mocked_dataloaders # noqa: F811
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _UpperCAmelCase ) == "1":
SCREAMING_SNAKE_CASE_: Tuple = 2
# New Code #
SCREAMING_SNAKE_CASE_: List[str] = int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE_: int = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCAmelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_: Tuple = config["lr"]
SCREAMING_SNAKE_CASE_: List[str] = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE_: List[str] = int(config["seed"] )
SCREAMING_SNAKE_CASE_: Optional[int] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE_: str = evaluate.load("glue" , "mrpc" )
set_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_: Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_: List[Any] = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_: Union[str, Any] = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_: str = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = output.loss
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[Any] = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE_: List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _UpperCAmelCase )
def A_ ( ):
SCREAMING_SNAKE_CASE_: str = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=_UpperCAmelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE_: List[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE_: Tuple = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 671 | 0 |
"""simple docstring"""
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class lowerCAmelCase__ ( UpperCAmelCase_ ):
"""simple docstring"""
__UpperCAmelCase : Tuple = CustomTokenizer
pass
| 575 |
from math import asin, atan, cos, radians, sin, sqrt, tan
lowerCAmelCase : Union[str, Any] = 637_8137.0
lowerCAmelCase : int = 635_6752.31_4245
lowerCAmelCase : Union[str, Any] = 6378137
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = (AXIS_A - AXIS_B) / AXIS_A
SCREAMING_SNAKE_CASE_: str = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
SCREAMING_SNAKE_CASE_: Optional[int] = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
SCREAMING_SNAKE_CASE_: Any = radians(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = radians(_UpperCAmelCase )
# Equation
SCREAMING_SNAKE_CASE_: str = sin((phi_a - phi_a) / 2 )
SCREAMING_SNAKE_CASE_: List[Any] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
SCREAMING_SNAKE_CASE_: Tuple = sqrt(sin_sq_phi + (cos(_UpperCAmelCase ) * cos(_UpperCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 0 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
_a : List[str] = """\
@inproceedings{snover-etal-2006-study,
title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",
author = \"Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John\",
booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",
month = aug # \" 8-12\",
year = \"2006\",
address = \"Cambridge, Massachusetts, USA\",
publisher = \"Association for Machine Translation in the Americas\",
url = \"https://aclanthology.org/2006.amta-papers.25\",
pages = \"223--231\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
_a : Any = """\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
"""
_a : str = """
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
'score' (float): TER score (num_edits / sum_ref_lengths * 100)
'num_edits' (int): The cumulative number of edits
'ref_length' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}
Example 2:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}
Example 3:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}
Example 4:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}
Example 5:
>>> predictions = [\"does this sentence match??\",
... \"what about this sentence?\",
... \"What did the TER metric user say to the developer?\"]
>>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],
... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],
... [\"Your jokes are...\", \"...TERrible\"]]
>>> ter = datasets.load_metric(\"ter\")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A (datasets.Metric ):
def _snake_case ( self ):
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , ):
__UpperCAmelCase : Union[str, Any] = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
__UpperCAmelCase : Optional[int] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
__UpperCAmelCase : int = TER(
normalized=lowerCAmelCase__ , no_punct=lowerCAmelCase__ , asian_support=lowerCAmelCase__ , case_sensitive=lowerCAmelCase__ , )
__UpperCAmelCase : Union[str, Any] = sb_ter.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 168 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_: List[Any] = BertConfig.from_json_file(_UpperCAmelCase )
print(f"Building PyTorch model from configuration: {config}" )
SCREAMING_SNAKE_CASE_: Tuple = BertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 671 | 0 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def __lowercase ( lowerCamelCase : str , lowerCamelCase : Optional[Any] ):
UpperCamelCase_ : str = args.log_outputs
UpperCamelCase_ : Optional[int] = "_".join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
UpperCamelCase_ : str = load_metric('wer' )
UpperCamelCase_ : str = load_metric('cer' )
# compute metrics
UpperCamelCase_ : Optional[Any] = wer.compute(references=result['target'] , predictions=result['prediction'] )
UpperCamelCase_ : List[Any] = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
UpperCamelCase_ : Any = F"WER: {wer_result}\nCER: {cer_result}"
print(_UpperCAmelCase )
with open(F"{dataset_id}_eval_results.txt" , 'w' ) as f:
f.write(_UpperCAmelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCamelCase_ : Optional[Any] = F"log_{dataset_id}_predictions.txt"
UpperCamelCase_ : List[str] = F"log_{dataset_id}_targets.txt"
with open(_UpperCAmelCase , 'w' ) as p, open(_UpperCAmelCase , 'w' ) as t:
# mapping function to write output
def write_to_file(lowerCamelCase : int , lowerCamelCase : Optional[int] ):
p.write(F"{i}" + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(F"{i}" + '\n' )
t.write(batch['target'] + '\n' )
result.map(_UpperCAmelCase , with_indices=_UpperCAmelCase )
def __lowercase ( lowerCamelCase : Union[str, Any] ):
UpperCamelCase_ : Any = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCamelCase_ : List[Any] = re.sub(_UpperCAmelCase , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCamelCase_ : Optional[Any] = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
UpperCamelCase_ : str = " ".join(text.split(_UpperCAmelCase ) )
return text
def __lowercase ( lowerCamelCase : int ):
# load dataset
UpperCamelCase_ : str = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=_UpperCAmelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCamelCase_ : Dict = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCamelCase_ : Optional[int] = feature_extractor.sampling_rate
# resample audio
UpperCamelCase_ : Dict = dataset.cast_column('audio' , Audio(sampling_rate=_UpperCAmelCase ) )
# load eval pipeline
if args.device is None:
UpperCamelCase_ : Tuple = 0 if torch.cuda.is_available() else -1
UpperCamelCase_ : Union[str, Any] = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCamelCase : List[str] ):
UpperCamelCase_ : Dict = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCamelCase_ : int = prediction["text"]
UpperCamelCase_ : Optional[Any] = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
UpperCamelCase_ : Optional[Any] = dataset.map(_UpperCAmelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
a_ = parser.parse_args()
main(args)
| 417 |
import math
def A_ ( _UpperCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A_ ( _UpperCAmelCase = 0.1 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = 3
SCREAMING_SNAKE_CASE_: Optional[int] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_UpperCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 0 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__A = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def lowercase_ ( _lowerCamelCase: Tuple ) -> Tuple:
'''simple docstring'''
__lowerCamelCase : Optional[int] = test_results.split(" " )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : str = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__lowerCamelCase : Optional[Any] = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(_UpperCAmelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowercase_ ( _lowerCamelCase: int ) -> Tuple:
'''simple docstring'''
__lowerCamelCase : str = {}
__lowerCamelCase : Any = None
__lowerCamelCase : Union[str, Any] = False
for line in failures_short_lines.split("\n" ):
if re.search(r"_ \[doctest\]" , _UpperCAmelCase ):
__lowerCamelCase : List[Any] = True
__lowerCamelCase : Dict = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
__lowerCamelCase : Union[str, Any] = line
__lowerCamelCase : List[str] = False
return failures
class _snake_case :
def __init__( self : Any , UpperCAmelCase : str , UpperCAmelCase : Dict ):
__lowerCamelCase : Dict = title
__lowerCamelCase : int = doc_test_results["time_spent"].split("," )[0]
__lowerCamelCase : int = doc_test_results["success"]
__lowerCamelCase : Optional[Any] = doc_test_results["failures"]
__lowerCamelCase : Any = self.n_success + self.n_failures
# Failures and success of the modeling tests
__lowerCamelCase : Optional[int] = doc_test_results
@property
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : int = [self._time_spent]
__lowerCamelCase : List[Any] = 0
for time in time_spent:
__lowerCamelCase : Union[str, Any] = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowerCAmelCase__ ) == 1:
__lowerCamelCase : Dict = [0, 0, time_parts[0]]
__lowerCamelCase : int = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
__lowerCamelCase : str = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F"""{int(lowerCAmelCase__ )}h{int(lowerCAmelCase__ )}m{int(lowerCAmelCase__ )}s"""
@property
def lowerCamelCase__ ( self : List[Any] ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowerCamelCase__ ( self : Optional[Any] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def lowerCamelCase__ ( self : Optional[Any] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
F""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Optional[Any] = 40
__lowerCamelCase : List[str] = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )}
__lowerCamelCase : Tuple = ""
for category, failures in category_failures.items():
if len(lowerCAmelCase__ ) == 0:
continue
if report != "":
report += "\n\n"
report += F"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(lowerCAmelCase__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Optional[Any] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(lowerCAmelCase__ )
@staticmethod
def lowerCamelCase__ ( ):
__lowerCamelCase : List[str] = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(lowerCAmelCase__ )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=lowerCAmelCase__ , )
def lowerCamelCase__ ( self : Tuple ):
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
__lowerCamelCase : Optional[Any] = F"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else "All tests passed."
__lowerCamelCase : List[Any] = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=lowerCAmelCase__ , )
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] ):
__lowerCamelCase : Dict = ""
for key, value in failures.items():
__lowerCamelCase : str = value[:200] + " [Truncated]" if len(lowerCAmelCase__ ) > 250 else value
failures_text += F"""*{key}*\n_{value}_\n\n"""
__lowerCamelCase : Any = job_name
__lowerCamelCase : List[Any] = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
__lowerCamelCase : Tuple = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowerCamelCase__ ( self : Any ):
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
__lowerCamelCase : Tuple = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
__lowerCamelCase : Any = sorted(self.doc_test_results.items() , key=lambda UpperCAmelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
__lowerCamelCase : Union[str, Any] = F"""*Num failures* :{len(job_result["failed"] )} \n"""
__lowerCamelCase : Optional[Any] = job_result["failures"]
__lowerCamelCase : Optional[Any] = self.get_reply_blocks(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , text=lowerCAmelCase__ )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F"""Results for {job}""" , blocks=lowerCAmelCase__ , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def lowercase_ ( ) -> Tuple:
'''simple docstring'''
__lowerCamelCase : Tuple = os.environ["GITHUB_RUN_ID"]
__lowerCamelCase : Any = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
__lowerCamelCase : List[Any] = requests.get(_UpperCAmelCase ).json()
__lowerCamelCase : Optional[Any] = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
__lowerCamelCase : Any = math.ceil((result["total_count"] - 100) / 100 )
for i in range(_UpperCAmelCase ):
__lowerCamelCase : str = requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , _UpperCAmelCase )
return {}
def lowercase_ ( _lowerCamelCase: Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = {}
if os.path.exists(_UpperCAmelCase ):
__lowerCamelCase : List[str] = os.listdir(_UpperCAmelCase )
for file in files:
try:
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , encoding="utf-8" ) as f:
__lowerCamelCase : Dict = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(_UpperCAmelCase , _UpperCAmelCase )}.""" ) from e
return _artifact
def lowercase_ ( ) -> Dict:
'''simple docstring'''
class _snake_case :
def __init__( self : List[str] , UpperCAmelCase : str ):
__lowerCamelCase : Dict = name
__lowerCamelCase : List[str] = []
def __str__( self : Optional[Any] ):
return self.name
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : str ):
self.paths.append({"name": self.name, "path": path} )
__lowerCamelCase : Dict[str, Artifact] = {}
__lowerCamelCase : List[Any] = filter(os.path.isdir , os.listdir() )
for directory in directories:
__lowerCamelCase : Dict = directory
if artifact_name not in _available_artifacts:
__lowerCamelCase : Tuple = Artifact(_UpperCAmelCase )
_available_artifacts[artifact_name].add_path(_UpperCAmelCase )
return _available_artifacts
if __name__ == "__main__":
__A = get_job_links()
__A = retrieve_available_artifacts()
__A = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__A = {
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__A = github_actions_job_links.get('''run_doctests''')
__A = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
__A = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
__A = handle_test_results(artifact['''stats'''])
__A = failed
__A = success
__A = time_spent[1:-1] + """, """
__A = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
__A = line.replace('''FAILED ''', '''''')
__A = line.split()[0].replace('''\n''', '''''')
if "::" in line:
__A = line.split('''::''')
else:
__A = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__A = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__A = all_failures[test] if test in all_failures else """N/A"""
__A = failure
break
__A = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply() | 646 |
import re
def A_ ( _UpperCAmelCase ):
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
try:
SCREAMING_SNAKE_CASE_: List[Any] = split_input(_UpperCAmelCase )
if upper:
SCREAMING_SNAKE_CASE_: List[str] = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
SCREAMING_SNAKE_CASE_: Optional[int] = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def A_ ( _UpperCAmelCase ):
return to_simple_case(_UpperCAmelCase )
def A_ ( _UpperCAmelCase ):
try:
SCREAMING_SNAKE_CASE_: Optional[int] = to_simple_case(_UpperCAmelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
return to_complex_case(_UpperCAmelCase , _UpperCAmelCase , "_" )
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
return to_complex_case(_UpperCAmelCase , _UpperCAmelCase , "-" )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 671 | 0 |
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
class lowerCAmelCase_ ( UpperCAmelCase_ ):
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ):
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 668 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = '''upernet'''
def __init__( self : Any , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : str=[1, 2, 3, 6] , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Dict=0.4 , lowerCAmelCase__ : int=384 , lowerCAmelCase__ : Union[str, Any]=256 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : List[str]=255 , **lowerCAmelCase__ : List[str] , ):
super().__init__(**lowerCAmelCase__)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
SCREAMING_SNAKE_CASE_: Dict = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = backbone_config.get("model_type")
SCREAMING_SNAKE_CASE_: str = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_: Tuple = config_class.from_dict(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = backbone_config
SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_: Dict = initializer_range
SCREAMING_SNAKE_CASE_: Any = pool_scales
SCREAMING_SNAKE_CASE_: Optional[Any] = use_auxiliary_head
SCREAMING_SNAKE_CASE_: str = auxiliary_loss_weight
SCREAMING_SNAKE_CASE_: List[Any] = auxiliary_in_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] = auxiliary_channels
SCREAMING_SNAKE_CASE_: Dict = auxiliary_num_convs
SCREAMING_SNAKE_CASE_: str = auxiliary_concat_input
SCREAMING_SNAKE_CASE_: Dict = loss_ignore_index
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Tuple = copy.deepcopy(self.__dict__)
SCREAMING_SNAKE_CASE_: int = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_: Optional[int] = self.__class__.model_type
return output
| 671 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
_lowerCamelCase : Union[str, Any] = """\
@inproceedings{popovic-2015-chrf,
title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",
month = sep,
year = \"2015\",
address = \"Lisbon, Portugal\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W15-3049\",
doi = \"10.18653/v1/W15-3049\",
pages = \"392--395\",
}
@inproceedings{popovic-2017-chrf,
title = \"chr{F}++: words helping character n-grams\",
author = \"Popovi{\'c}, Maja\",
booktitle = \"Proceedings of the Second Conference on Machine Translation\",
month = sep,
year = \"2017\",
address = \"Copenhagen, Denmark\",
publisher = \"Association for Computational Linguistics\",
url = \"https://aclanthology.org/W17-4770\",
doi = \"10.18653/v1/W17-4770\",
pages = \"612--618\",
}
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
_lowerCamelCase : str = """\
ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,
and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation
that is already present in sacrebleu.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.
"""
_lowerCamelCase : str = """
Produces ChrF(++) scores for hypotheses given reference translations.
Args:
predictions (list of str): The predicted sentences.
references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.
char_order (int): Character n-gram order. Defaults to `6`.
word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.
beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.
lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.
whitespace (bool): If `True`, include whitespaces when extracting character n-grams.
eps_smoothing (bool): If `True`, applies epsilon smoothing similar
to reference chrF++.py, NLTK and Moses implementations. If `False`,
it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.
Returns:
'score' (float): The chrF (chrF++) score,
'char_order' (int): The character n-gram order,
'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,
'beta' (int): Determine the importance of recall w.r.t precision
Examples:
Example 1--a simple example of calculating chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction, references=reference)
>>> print(results)
{'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}
Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2)
>>> print(results)
{'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}
Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:
>>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]
>>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]
>>> chrf = datasets.load_metric(\"chrf\")
>>> results = chrf.compute(predictions=prediction,
... references=reference,
... word_order=2,
... lowercase=True)
>>> print(results)
{'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case__ ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase ( self : str ) -> int:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install \"sacrebleu>=1.4.12\"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] , reference_urls=[
'''https://github.com/m-popovic/chrF''',
] , )
def UpperCamelCase ( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int = CHRF.CHAR_ORDER , lowerCAmelCase_ : int = CHRF.WORD_ORDER , lowerCAmelCase_ : int = CHRF.BETA , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , ) -> Optional[Any]:
UpperCAmelCase_ = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
UpperCAmelCase_ = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
UpperCAmelCase_ = CHRF(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_ = sb_chrf.corpus_score(lowerCAmelCase__ , lowerCAmelCase__ )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 121 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: List[str] = torch.nn.Linear(10 , 10)
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.optim.SGD(model.parameters() , 0.1)
SCREAMING_SNAKE_CASE_: Any = Accelerator()
SCREAMING_SNAKE_CASE_: List[str] = accelerator.prepare(lowerCAmelCase__)
try:
pickle.loads(pickle.dumps(lowerCAmelCase__))
except Exception as e:
self.fail(F"Accelerated optimizer pickling failed with {e}")
AcceleratorState._reset_state()
| 671 | 0 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCamelCase( UpperCAmelCase_, unittest.TestCase ):
lowercase_ : str = TransfoXLTokenizer
lowercase_ : int = False
lowercase_ : Union[str, Any] = False
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
super().setUp()
_lowercase : List[Any] = [
"<unk>",
"[CLS]",
"[SEP]",
"want",
"unwanted",
"wa",
"un",
"running",
",",
"low",
"l",
]
_lowercase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
def UpperCamelCase ( self, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : List[Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def UpperCamelCase ( self, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : Tuple = "<unk> UNwanted , running"
_lowercase : Any = "<unk> unwanted, running"
return input_text, output_text
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : List[str] = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=lowerCAmelCase__)
_lowercase : Tuple = tokenizer.tokenize('<unk> UNwanted , running')
self.assertListEqual(lowerCAmelCase__, ['<unk>', 'unwanted', ',', 'running'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__), [0, 4, 8, 7])
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = TransfoXLTokenizer(lower_case=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? '), ['hello', '!', 'how', 'are', 'you', '?'])
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : str = TransfoXLTokenizer(lower_case=lowerCAmelCase__)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? '), ['HeLLo', '!', 'how', 'Are', 'yoU', '?'])
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Any = TransfoXLTokenizer(lower_case=lowerCAmelCase__)
_lowercase : List[Any] = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"
_lowercase : str = [
"Hello",
"(",
"bracket",
")",
"and",
"side",
"@-@",
"scrolled",
"[",
"and",
"]",
"Henry",
"'s",
"$",
"5",
"@,@",
"000",
"with",
"3",
"@.@",
"34",
"m",
".",
"What",
"'s",
"up",
"!",
"?",
]
self.assertListEqual(tokenizer.tokenize(lowerCAmelCase__), lowerCAmelCase__)
self.assertEqual(tokenizer.convert_tokens_to_string(lowerCAmelCase__), lowerCAmelCase__)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Any = self.get_tokenizer()
_lowercase : Any = len(lowerCAmelCase__)
tokenizer.add_tokens(['new1', 'new2'])
tokenizer.move_added_token('new1', 1)
# Check that moved token is not copied (duplicate)
self.assertEqual(len(lowerCAmelCase__), original_len + 2)
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1'), [1])
self.assertEqual(tokenizer.decode([1]), 'new1')
| 89 |
from itertools import count
def A_ ( _UpperCAmelCase = 50 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = [1] * min_block_length
for n in count(_UpperCAmelCase ):
fill_count_functions.append(1 )
for block_length in range(_UpperCAmelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 671 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCamelCase__ = pytest.mark.integration
@require_faiss
class lowerCAmelCase__ ( UpperCAmelCase_ ):
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = Dataset.from_dict({"""filename""": ["""my_name-train""" + """_""" + str(lowerCAmelCase__ ) for x in np.arange(30 ).tolist()]} )
return dset
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
import faiss
_UpperCamelCase = self._create_dummy_dataset()
_UpperCamelCase = dset.map(
lambda a , a : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ )
_UpperCamelCase = dset.add_faiss_index("""vecs""" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
_UpperCamelCase = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
dset.drop_index("""vecs""" )
def A_ ( self ) -> Any:
'''simple docstring'''
import faiss
_UpperCamelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
_UpperCamelCase = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def A_ ( self ) -> Dict:
'''simple docstring'''
import faiss
_UpperCamelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCAmelCase__ ) as tmp_file:
dset.save_faiss_index("""vecs""" , tmp_file.name )
dset.load_faiss_index("""vecs2""" , tmp_file.name )
os.unlink(tmp_file.name )
_UpperCamelCase = dset.get_nearest_examples("""vecs2""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" )
dset.drop_index("""vecs""" )
self.assertRaises(lowerCAmelCase__ , partial(dset.get_nearest_examples , """vecs2""" , np.ones(5 , dtype=np.floataa ) ) )
def A_ ( self ) -> int:
'''simple docstring'''
from elasticsearch import Elasticsearch
_UpperCamelCase = self._create_dummy_dataset()
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
_UpperCamelCase = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
_UpperCamelCase = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
_UpperCamelCase = Elasticsearch()
dset.add_elasticsearch_index("""filename""" , es_client=lowerCAmelCase__ )
_UpperCamelCase = dset.get_nearest_examples("""filename""" , """my_name-train_29""" )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
@require_faiss
class lowerCAmelCase__ ( UpperCAmelCase_ ):
def A_ ( self ) -> Any:
'''simple docstring'''
import faiss
_UpperCamelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
_UpperCamelCase = np.zeros(5 , dtype=np.floataa )
_UpperCamelCase = 1
_UpperCamelCase = index.search(lowerCAmelCase__ )
self.assertRaises(lowerCAmelCase__ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
_UpperCamelCase = np.eye(5 , dtype=np.floataa )[::-1]
_UpperCamelCase = index.search_batch(lowerCAmelCase__ )
self.assertRaises(lowerCAmelCase__ , index.search_batch , queries[0] )
_UpperCamelCase = [scores[0] for scores in total_scores]
_UpperCamelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase__ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowerCAmelCase__ )
def A_ ( self ) -> Dict:
'''simple docstring'''
import faiss
_UpperCamelCase = FaissIndex(string_factory="""Flat""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
_UpperCamelCase = FaissIndex(string_factory="""LSH""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowerCAmelCase__ ):
_UpperCamelCase = FaissIndex(string_factory="""Flat""" , custom_index=faiss.IndexFlat(5 ) )
def A_ ( self ) -> str:
'''simple docstring'''
import faiss
_UpperCamelCase = faiss.IndexFlat(5 )
_UpperCamelCase = FaissIndex(custom_index=lowerCAmelCase__ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def A_ ( self ) -> int:
'''simple docstring'''
import faiss
_UpperCamelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCAmelCase__ ) as tmp_file:
index.save(tmp_file.name )
_UpperCamelCase = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
_UpperCamelCase = np.zeros(5 , dtype=np.floataa )
_UpperCamelCase = 1
_UpperCamelCase = index.search(lowerCAmelCase__ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def __A(lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
import faiss
_UpperCamelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
_UpperCamelCase = "index.faiss"
_UpperCamelCase = F'mock://{index_name}'
index.save(_UpperCAmelCase , storage_options=mockfs.storage_options )
_UpperCamelCase = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options )
_UpperCamelCase = np.zeros(5 , dtype=np.floataa )
_UpperCamelCase = 1
_UpperCamelCase = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowerCAmelCase__ ( UpperCAmelCase_ ):
def A_ ( self ) -> List[str]:
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
_UpperCamelCase = Elasticsearch()
_UpperCamelCase = {"acknowledged": True}
_UpperCamelCase = ElasticSearchIndex(es_client=lowerCAmelCase__ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["""foo""", """bar""", """foobar"""] )
# single query
_UpperCamelCase = "foo"
_UpperCamelCase = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
_UpperCamelCase = index.search(lowerCAmelCase__ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
_UpperCamelCase = "foo"
_UpperCamelCase = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
_UpperCamelCase = index.search(lowerCAmelCase__ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
_UpperCamelCase = ["foo", "bar", "foobar"]
_UpperCamelCase = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
_UpperCamelCase = index.search_batch(lowerCAmelCase__ )
_UpperCamelCase = [scores[0] for scores in total_scores]
_UpperCamelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase__ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCAmelCase__ )
# batched queries with timeout
_UpperCamelCase = ["foo", "bar", "foobar"]
_UpperCamelCase = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
_UpperCamelCase = index.search_batch(lowerCAmelCase__ , request_timeout=30 )
_UpperCamelCase = [scores[0] for scores in total_scores]
_UpperCamelCase = [indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCAmelCase__ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCAmelCase__ )
| 612 |
def A_ ( _UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError("only integers accepted as input" )
else:
SCREAMING_SNAKE_CASE_: List[Any] = str(abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_: Tuple = [list(_UpperCAmelCase ) for char in range(len(_UpperCAmelCase ) )]
for index in range(len(_UpperCAmelCase ) ):
num_transpositions[index].pop(_UpperCAmelCase )
return max(
int("".join(list(_UpperCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 671 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowercase__ = [
"""EAGER""",
"""AOT_EAGER""",
"""INDUCTOR""",
"""NVFUSER""",
"""AOT_NVFUSER""",
"""AOT_CUDAGRAPHS""",
"""OFI""",
"""FX2TRT""",
"""ONNXRT""",
"""IPEX""",
]
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = True
while ask_again:
lowerCAmelCase_ : Dict = input(_UpperCAmelCase )
try:
if default is not None and len(_UpperCAmelCase ) == 0:
return default
return convert_value(_UpperCAmelCase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_UpperCAmelCase )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase=[] , __UpperCamelCase=None , __UpperCamelCase=0 ) -> Any:
"""simple docstring"""
lowerCAmelCase_ : Any = BulletMenu(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase_ : List[Any] = menu.run(default_choice=_UpperCAmelCase )
return convert_value(_UpperCAmelCase ) if convert_value is not None else result
def __lowerCamelCase ( __UpperCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = int(_UpperCAmelCase )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def __lowerCamelCase ( __UpperCamelCase ) -> Any:
"""simple docstring"""
lowerCAmelCase_ : Tuple = int(_UpperCAmelCase )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def __lowerCamelCase ( __UpperCamelCase ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : List[Any] = int(_UpperCAmelCase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def __lowerCamelCase ( __UpperCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = int(_UpperCAmelCase )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def __lowerCamelCase ( __UpperCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = int(_UpperCAmelCase )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def __lowerCamelCase ( __UpperCamelCase ) -> List[str]:
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class __lowerCamelCase ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def lowerCamelCase ( self : List[str] , a_ : List[Any] , a_ : List[Any] , a_ : str , a_ : Optional[Any] ):
lowerCAmelCase_ : int = super()._format_usage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = usage.replace("<command> [<args>] " , "" )
return usage
| 610 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __lowercase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : Any):
SCREAMING_SNAKE_CASE_: Any = data
SCREAMING_SNAKE_CASE_: Node | None = None
class __lowercase :
"""simple docstring"""
def __init__( self : int):
SCREAMING_SNAKE_CASE_: Dict = None
SCREAMING_SNAKE_CASE_: str = None
def __iter__( self : List[str]):
SCREAMING_SNAKE_CASE_: Tuple = self.head
while self.head:
yield node.data
SCREAMING_SNAKE_CASE_: List[str] = node.next
if node == self.head:
break
def __len__( self : Dict):
return sum(1 for _ in self)
def __repr__( self : Dict):
return "->".join(str(lowerCAmelCase__) for item in iter(self))
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any):
self.insert_nth(len(self) , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Any):
self.insert_nth(0 , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any):
if index < 0 or index > len(self):
raise IndexError("list index out of range.")
SCREAMING_SNAKE_CASE_: Any = Node(lowerCAmelCase__)
if self.head is None:
SCREAMING_SNAKE_CASE_: str = new_node # first node points itself
SCREAMING_SNAKE_CASE_: Optional[Any] = new_node
elif index == 0: # insert at head
SCREAMING_SNAKE_CASE_: Optional[Any] = self.head
SCREAMING_SNAKE_CASE_: str = new_node
else:
SCREAMING_SNAKE_CASE_: int = self.head
for _ in range(index - 1):
SCREAMING_SNAKE_CASE_: Optional[Any] = temp.next
SCREAMING_SNAKE_CASE_: List[str] = temp.next
SCREAMING_SNAKE_CASE_: int = new_node
if index == len(self) - 1: # insert at tail
SCREAMING_SNAKE_CASE_: Any = new_node
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return self.delete_nth(0)
def _SCREAMING_SNAKE_CASE ( self : Any):
return self.delete_nth(len(self) - 1)
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : int = 0):
if not 0 <= index < len(self):
raise IndexError("list index out of range.")
SCREAMING_SNAKE_CASE_: Optional[Any] = self.head
if self.head == self.tail: # just one node
SCREAMING_SNAKE_CASE_: List[str] = None
elif index == 0: # delete head node
SCREAMING_SNAKE_CASE_: int = self.tail.next.next
SCREAMING_SNAKE_CASE_: Tuple = self.head.next
else:
SCREAMING_SNAKE_CASE_: Optional[int] = self.head
for _ in range(index - 1):
SCREAMING_SNAKE_CASE_: Any = temp.next
SCREAMING_SNAKE_CASE_: Optional[Any] = temp.next
SCREAMING_SNAKE_CASE_: int = temp.next.next
if index == len(self) - 1: # delete at tail
SCREAMING_SNAKE_CASE_: int = temp
return delete_node.data
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return len(self) == 0
def A_ ( ):
SCREAMING_SNAKE_CASE_: Dict = CircularLinkedList()
assert len(_UpperCAmelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(_UpperCAmelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(_UpperCAmelCase ) == i
circular_linked_list.insert_nth(_UpperCAmelCase , i + 1 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(_UpperCAmelCase ) == "->".join(str(_UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 0 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
__UpperCamelCase : Any = get_tests_dir('fixtures/dummy-config.json')
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = 0
def _snake_case ( self : Tuple ):
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def _snake_case ( self : str ):
'''simple docstring'''
__lowerCamelCase : List[str] = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase : Tuple = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _snake_case ( self : Tuple ):
'''simple docstring'''
__lowerCamelCase : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase : Tuple = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _snake_case ( self : Dict ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
__lowerCamelCase : List[str] = os.path.join(lowerCAmelCase__ , """fake-roberta""" )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
__lowerCamelCase : Optional[int] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(type(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
try:
AutoConfig.register("""custom""" , lowerCAmelCase__ )
# Wrong model type will raise an error
with self.assertRaises(lowerCAmelCase__ ):
AutoConfig.register("""model""" , lowerCAmelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__ ):
AutoConfig.register("""bert""" , lowerCAmelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowerCamelCase : Any = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ )
__lowerCamelCase : str = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _snake_case ( self : Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCAmelCase__ , """bert-base is not a local folder and is not a valid model identifier""" ):
__lowerCamelCase : Any = AutoConfig.from_pretrained("""bert-base""" )
def _snake_case ( self : Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCAmelCase__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowerCamelCase : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ , revision="""aaaaaa""" )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
lowerCAmelCase__ , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
__lowerCamelCase : int = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def _snake_case ( self : Dict ):
'''simple docstring'''
with self.assertRaises(lowerCAmelCase__ ):
__lowerCamelCase : str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase__ ):
__lowerCamelCase : List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowerCAmelCase__ )
__lowerCamelCase : int = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ )
__lowerCamelCase : str = AutoConfig.from_pretrained(lowerCAmelCase__ , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def _snake_case ( self : Optional[int] ):
'''simple docstring'''
class _UpperCamelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] = '''new-model'''
try:
AutoConfig.register("""new-model""" , lowerCAmelCase__ )
# If remote code is not set, the default is to use local
__lowerCamelCase : List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
__lowerCamelCase : List[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
__lowerCamelCase : Optional[int] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 519 |
from collections import defaultdict
from math import ceil, sqrt
def A_ ( _UpperCAmelCase = 1_00_00_00 , _UpperCAmelCase = 10 ):
SCREAMING_SNAKE_CASE_: defaultdict = defaultdict(_UpperCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
SCREAMING_SNAKE_CASE_: Tuple = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_UpperCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 671 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCamelCase_ :
'''simple docstring'''
lowercase_ = 42
# setable values
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
@classmethod
def lowerCAmelCase_ ( cls : Dict , _lowerCAmelCase : CommonSchedulerState , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray ):
return cls(common=lowerCAmelCase__ , init_noise_sigma=lowerCAmelCase__ , timesteps=lowerCAmelCase__ )
@dataclass
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
lowercase_ = 42
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
lowercase_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowercase_ = 42
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return True
@register_to_config
def __init__( self : List[Any] , _lowerCAmelCase : int = 1_000 , _lowerCAmelCase : float = 0.0001 , _lowerCAmelCase : float = 0.02 , _lowerCAmelCase : str = "linear" , _lowerCAmelCase : Optional[jnp.ndarray] = None , _lowerCAmelCase : str = "fixed_small" , _lowerCAmelCase : bool = True , _lowerCAmelCase : str = "epsilon" , _lowerCAmelCase : jnp.dtype = jnp.floataa , ):
SCREAMING_SNAKE_CASE_ = dtype
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : Optional[CommonSchedulerState] = None ):
if common is None:
SCREAMING_SNAKE_CASE_ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE_ = jnp.array(1.0 , dtype=self.dtype )
SCREAMING_SNAKE_CASE_ = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowerCAmelCase__ , init_noise_sigma=lowerCAmelCase__ , timesteps=lowerCAmelCase__ , )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : DDPMSchedulerState , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : Optional[int] = None ):
return sample
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : DDPMSchedulerState , _lowerCAmelCase : int , _lowerCAmelCase : Tuple = () ):
SCREAMING_SNAKE_CASE_ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ = (jnp.arange(0 , lowerCAmelCase__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowerCAmelCase__ , timesteps=lowerCAmelCase__ , )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : DDPMSchedulerState , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Any=None ):
SCREAMING_SNAKE_CASE_ = state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE_ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
SCREAMING_SNAKE_CASE_ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
SCREAMING_SNAKE_CASE_ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
SCREAMING_SNAKE_CASE_ = jnp.clip(lowerCAmelCase__ , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
SCREAMING_SNAKE_CASE_ = jnp.log(jnp.clip(lowerCAmelCase__ , a_min=1E-20 ) )
elif variance_type == "fixed_large":
SCREAMING_SNAKE_CASE_ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
SCREAMING_SNAKE_CASE_ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
SCREAMING_SNAKE_CASE_ = variance
SCREAMING_SNAKE_CASE_ = state.common.betas[t]
SCREAMING_SNAKE_CASE_ = (predicted_variance + 1) / 2
SCREAMING_SNAKE_CASE_ = frac * max_log + (1 - frac) * min_log
return variance
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : DDPMSchedulerState , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : int , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : Optional[jax.random.KeyArray] = None , _lowerCAmelCase : bool = True , ):
SCREAMING_SNAKE_CASE_ = timestep
if key is None:
SCREAMING_SNAKE_CASE_ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
SCREAMING_SNAKE_CASE_ = jnp.split(lowerCAmelCase__ , sample.shape[1] , axis=1 )
else:
SCREAMING_SNAKE_CASE_ = None
# 1. compute alphas, betas
SCREAMING_SNAKE_CASE_ = state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE_ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE_ = 1 - alpha_prod_t
SCREAMING_SNAKE_CASE_ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE_ = model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE_ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
' for the FlaxDDPMScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE_ = jnp.clip(lowerCAmelCase__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE_ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
SCREAMING_SNAKE_CASE_ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
SCREAMING_SNAKE_CASE_ = jax.random.split(lowerCAmelCase__ , num=1 )
SCREAMING_SNAKE_CASE_ = jax.random.normal(lowerCAmelCase__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(lowerCAmelCase__ , lowerCAmelCase__ , predicted_variance=lowerCAmelCase__ ) ** 0.5) * noise
SCREAMING_SNAKE_CASE_ = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowerCAmelCase__ , state=lowerCAmelCase__ )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : DDPMSchedulerState , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray , ):
return add_noise_common(state.common , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : DDPMSchedulerState , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray , _lowerCAmelCase : jnp.ndarray , ):
return get_velocity_common(state.common , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __len__( self : Any ):
return self.config.num_train_timesteps | 31 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase : str = {
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 671 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__A : Optional[int] = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__A : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 575 |
lowerCAmelCase : List[str] = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = set()
# keep track of all the paths to be checked
SCREAMING_SNAKE_CASE_: Tuple = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
SCREAMING_SNAKE_CASE_: List[Any] = queue.pop(0 )
# get the last node from the path
SCREAMING_SNAKE_CASE_: Tuple = path[-1]
if node not in explored:
SCREAMING_SNAKE_CASE_: Union[str, Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
SCREAMING_SNAKE_CASE_: int = list(_UpperCAmelCase )
new_path.append(_UpperCAmelCase )
queue.append(_UpperCAmelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_UpperCAmelCase )
# in case there's no path between the 2 nodes
return []
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
SCREAMING_SNAKE_CASE_: List[Any] = [start]
SCREAMING_SNAKE_CASE_: List[str] = set(_UpperCAmelCase )
# Keep tab on distances from `start` node.
SCREAMING_SNAKE_CASE_: Union[str, Any] = {start: 0, target: -1}
while queue:
SCREAMING_SNAKE_CASE_: Dict = queue.pop(0 )
if node == target:
SCREAMING_SNAKE_CASE_: Tuple = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_UpperCAmelCase )
queue.append(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
| 671 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_a : Any = logging.get_logger(__name__)
class __A (UpperCAmelCase_ ):
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 168 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : float):
return 0.0
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
SCREAMING_SNAKE_CASE_: Dict = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = 5_12
SCREAMING_SNAKE_CASE_: str = [1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE_: Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs]
SCREAMING_SNAKE_CASE_: Optional[Any] = [0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE_: Tuple = np.abs(np.fft.fft(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_: Optional[Any] = 20 * np.logaa(_UpperCAmelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
# Display within reasonable bounds
SCREAMING_SNAKE_CASE_: Any = get_bounds(_UpperCAmelCase , _UpperCAmelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel("Gain (dB)" )
plt.plot(_UpperCAmelCase )
plt.show()
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = 5_12
SCREAMING_SNAKE_CASE_: Union[str, Any] = [1] + [0] * (size - 1)
SCREAMING_SNAKE_CASE_: Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs]
SCREAMING_SNAKE_CASE_: int = [0] * (samplerate - size) # zero-padding
outputs += filler
SCREAMING_SNAKE_CASE_: Any = np.angle(np.fft.fft(_UpperCAmelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel("Frequency (Hz)" )
plt.xscale("log" )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel("Phase shift (Radians)" )
plt.plot(np.unwrap(_UpperCAmelCase , -2 * pi ) )
plt.show()
| 671 | 0 |
def __lowercase ( lowerCamelCase : str ):
try:
UpperCamelCase_ : int = float(_UpperCAmelCase )
except ValueError:
raise ValueError('Please enter a valid number' )
UpperCamelCase_ : str = decimal - int(_UpperCAmelCase )
if fractional_part == 0:
return int(_UpperCAmelCase ), 1
else:
UpperCamelCase_ : Tuple = len(str(_UpperCAmelCase ).split('.' )[1] )
UpperCamelCase_ : Optional[Any] = int(decimal * (10**number_of_frac_digits) )
UpperCamelCase_ : Tuple = 10**number_of_frac_digits
UpperCamelCase_ : Optional[int] = denominator, numerator
while True:
UpperCamelCase_ : Optional[int] = dividend % divisor
if remainder == 0:
break
UpperCamelCase_ : List[str] = divisor, remainder
UpperCamelCase_ : Optional[Any] = numerator / divisor, denominator / divisor
return int(_UpperCAmelCase ), int(_UpperCAmelCase )
if __name__ == "__main__":
print(F"""{decimal_to_fraction(2) = }""")
print(F"""{decimal_to_fraction(89.0) = }""")
print(F"""{decimal_to_fraction('67') = }""")
print(F"""{decimal_to_fraction('45.0') = }""")
print(F"""{decimal_to_fraction(1.5) = }""")
print(F"""{decimal_to_fraction('6.25') = }""")
print(F"""{decimal_to_fraction('78td') = }""")
| 417 |
from __future__ import annotations
from math import ceil, floor, sqrt
def A_ ( _UpperCAmelCase = 2_00_00_00 ):
SCREAMING_SNAKE_CASE_: list[int] = [0]
SCREAMING_SNAKE_CASE_: int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
SCREAMING_SNAKE_CASE_: int = 0
# the area corresponding to the grid that gives the product closest to target
SCREAMING_SNAKE_CASE_: int = 0
# an estimate of b, using the quadratic formula
SCREAMING_SNAKE_CASE_: float
# the largest integer less than b_estimate
SCREAMING_SNAKE_CASE_: int
# the largest integer less than b_estimate
SCREAMING_SNAKE_CASE_: int
# the triangle number corresponding to b_floor
SCREAMING_SNAKE_CASE_: int
# the triangle number corresponding to b_ceil
SCREAMING_SNAKE_CASE_: int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
SCREAMING_SNAKE_CASE_: List[Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
SCREAMING_SNAKE_CASE_: Any = floor(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = ceil(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Any = triangle_numbers[b_floor]
SCREAMING_SNAKE_CASE_: List[Any] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
SCREAMING_SNAKE_CASE_: int = triangle_b_first_guess * triangle_a
SCREAMING_SNAKE_CASE_: int = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
SCREAMING_SNAKE_CASE_: Optional[Any] = triangle_b_second_guess * triangle_a
SCREAMING_SNAKE_CASE_: Tuple = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'''{solution() = }''')
| 671 | 0 |
"""simple docstring"""
import heapq
import sys
import numpy as np
__A = tuple[int, int]
class _snake_case :
def __init__( self : Tuple ):
__lowerCamelCase : str = []
__lowerCamelCase : str = set()
def lowerCamelCase__ ( self : str ):
if not self.empty():
return self.elements[0][0]
else:
return float("inf" )
def lowerCamelCase__ ( self : int ):
return len(self.elements ) == 0
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] ):
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(lowerCAmelCase__ )
else:
# update
# print("update", item)
__lowerCamelCase : Optional[Any] = []
(__lowerCamelCase) : Optional[int] = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
(__lowerCamelCase) : str = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : List[str] ):
if item in self.set:
self.set.remove(lowerCAmelCase__ )
__lowerCamelCase : Optional[int] = []
(__lowerCamelCase) : Dict = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
(__lowerCamelCase) : str = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def lowerCamelCase__ ( self : List[str] ):
return self.elements[0][1]
def lowerCamelCase__ ( self : str ):
(__lowerCamelCase) : Optional[Any] = heapq.heappop(self.elements )
self.set.remove(lowerCAmelCase__ )
return (priority, item)
def lowercase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: Optional[Any] ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase : List[str] = np.array(_UpperCAmelCase )
__lowerCamelCase : Any = np.array(_UpperCAmelCase )
return np.linalg.norm(a - b )
def lowercase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: Dict ) -> Tuple:
'''simple docstring'''
return consistent_heuristic(_UpperCAmelCase , _UpperCAmelCase ) // t
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: Dict ) -> str:
'''simple docstring'''
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def lowercase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Optional[int] , _lowerCamelCase: List[Any] , _lowerCamelCase: Tuple ) -> List[str]:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = g_function[start] + Wa * heuristics[i](_UpperCAmelCase , _UpperCAmelCase )
return ans
def lowercase_ ( _lowerCamelCase: int , _lowerCamelCase: List[str] , _lowerCamelCase: str ) -> List[str]:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = np.chararray((n, n) )
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
__lowerCamelCase : int = "*"
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
if (j, (n - 1) - i) in blocks:
__lowerCamelCase : str = "#"
__lowerCamelCase : Optional[int] = "-"
__lowerCamelCase : Dict = back_pointer[goal]
while x != start:
(__lowerCamelCase) : str = x
# print(x)
__lowerCamelCase : Dict = "-"
__lowerCamelCase : List[str] = back_pointer[x]
__lowerCamelCase : Union[str, Any] = "-"
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=" " )
print("<-- End position" , end=" " )
else:
print(grid[i][j] , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
print("PATH TAKEN BY THE ALGORITHM IS:-" )
__lowerCamelCase : Any = back_pointer[goal]
while x != start:
print(_UpperCAmelCase , end=" " )
__lowerCamelCase : List[Any] = back_pointer[x]
print(_UpperCAmelCase )
sys.exit()
def lowercase_ ( _lowerCamelCase: List[str] ) -> Union[str, Any]:
'''simple docstring'''
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def lowercase_ ( _lowerCamelCase: List[str] , _lowerCamelCase: Optional[Any] , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Dict , _lowerCamelCase: str , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Any , ) -> Optional[Any]:
'''simple docstring'''
for itera in range(_UpperCAmelCase ):
open_list[itera].remove_element(_UpperCAmelCase )
# print("s", s)
# print("j", j)
(__lowerCamelCase) : int = s
__lowerCamelCase : Any = (x - 1, y)
__lowerCamelCase : str = (x + 1, y)
__lowerCamelCase : Any = (x, y + 1)
__lowerCamelCase : Union[str, Any] = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCAmelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCAmelCase )
__lowerCamelCase : int = -1
__lowerCamelCase : Union[str, Any] = float("inf" )
if valid(_UpperCAmelCase ) and g_function[neighbours] > g_function[s] + 1:
__lowerCamelCase : Tuple = g_function[s] + 1
__lowerCamelCase : str = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCAmelCase , key(_UpperCAmelCase , 0 , _UpperCAmelCase , _UpperCAmelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCAmelCase ):
if key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) <= Wa * key(
_UpperCAmelCase , 0 , _UpperCAmelCase , _UpperCAmelCase ):
open_list[j].put(
_UpperCAmelCase , key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
def lowercase_ ( ) -> List[str]:
'''simple docstring'''
__lowerCamelCase : Dict = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
__A = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__A = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__A = make_common_ground()
__A = blocks_blk
# hyper parameters
__A = 1
__A = 1
__A = 20
__A = 3 # one consistent and two other inconsistent
# start and end destination
__A = (0, 0)
__A = (n - 1, n - 1)
__A = 1
def lowercase_ ( _lowerCamelCase: Any , _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Tuple ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase : int = {start: 0, goal: float("inf" )}
__lowerCamelCase : List[str] = {start: -1, goal: -1}
__lowerCamelCase : List[str] = []
__lowerCamelCase : str = set()
for i in range(_UpperCAmelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCAmelCase , key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
__lowerCamelCase : list[int] = []
__lowerCamelCase : list[int] = []
while open_list[0].minkey() < float("inf" ):
for i in range(1 , _UpperCAmelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("inf" ):
do_something(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
__lowerCamelCase : int = open_list[i].top_show()
visited.add(_UpperCAmelCase )
expand_state(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
close_list_inad.append(_UpperCAmelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("inf" ):
do_something(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
__lowerCamelCase : Any = open_list[0].top_show()
visited.add(_UpperCAmelCase )
expand_state(
_UpperCAmelCase , 0 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
close_list_anchor.append(_UpperCAmelCase )
print("No path found to goal" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCAmelCase ):
if (j, i) in blocks:
print("#" , end=" " )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("*" , end=" " )
else:
print("-" , end=" " )
else:
print("*" , end=" " )
if (j, i) == (n - 1, n - 1):
print("<-- End position" , end=" " )
print()
print("^" )
print("Start position" )
print()
print("# is an obstacle" )
print("- is the path taken by algorithm" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic) | 646 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Optional[int] = {
"""configuration_longformer""": [
"""LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LongformerConfig""",
"""LongformerOnnxConfig""",
],
"""tokenization_longformer""": ["""LongformerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = ["""LongformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = [
"""LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongformerForMaskedLM""",
"""LongformerForMultipleChoice""",
"""LongformerForQuestionAnswering""",
"""LongformerForSequenceClassification""",
"""LongformerForTokenClassification""",
"""LongformerModel""",
"""LongformerPreTrainedModel""",
"""LongformerSelfAttention""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
"""TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLongformerForMaskedLM""",
"""TFLongformerForMultipleChoice""",
"""TFLongformerForQuestionAnswering""",
"""TFLongformerForSequenceClassification""",
"""TFLongformerForTokenClassification""",
"""TFLongformerModel""",
"""TFLongformerPreTrainedModel""",
"""TFLongformerSelfAttention""",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 671 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
def __snake_case ( self : List[Any] ):
lowerCAmelCase__ = tempfile.mkdtemp()
lowerCAmelCase__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCAmelCase__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowerCAmelCase__ = os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def __snake_case ( self : str , **SCREAMING_SNAKE_CASE_ : List[Any] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __snake_case ( self : Any , **SCREAMING_SNAKE_CASE_ : Dict ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __snake_case ( self : Dict , **SCREAMING_SNAKE_CASE_ : Optional[int] ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __snake_case ( self : List[str] ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self : List[Any] ):
lowerCAmelCase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase__ = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ )
lowerCAmelCase__ = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ )
def __snake_case ( self : List[Any] ):
lowerCAmelCase__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCAmelCase__ = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
lowerCAmelCase__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = image_processor(lowerCAmelCase__ , return_tensors='''np''' )
lowerCAmelCase__ = processor(images=lowerCAmelCase__ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = processor(text=lowerCAmelCase__ )
lowerCAmelCase__ = tokenizer(lowerCAmelCase__ , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def __snake_case ( self : int ):
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
lowerCAmelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ = processor.batch_decode(lowerCAmelCase__ )
lowerCAmelCase__ = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = self.get_image_processor()
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = self.prepare_image_inputs()
lowerCAmelCase__ = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 668 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase : Optional[int] = [
# (stable-diffusion, HF Diffusers)
("""time_embed.0.weight""", """time_embedding.linear_1.weight"""),
("""time_embed.0.bias""", """time_embedding.linear_1.bias"""),
("""time_embed.2.weight""", """time_embedding.linear_2.weight"""),
("""time_embed.2.bias""", """time_embedding.linear_2.bias"""),
("""input_blocks.0.0.weight""", """conv_in.weight"""),
("""input_blocks.0.0.bias""", """conv_in.bias"""),
("""out.0.weight""", """conv_norm_out.weight"""),
("""out.0.bias""", """conv_norm_out.bias"""),
("""out.2.weight""", """conv_out.weight"""),
("""out.2.bias""", """conv_out.bias"""),
]
lowerCAmelCase : str = [
# (stable-diffusion, HF Diffusers)
("""in_layers.0""", """norm1"""),
("""in_layers.2""", """conv1"""),
("""out_layers.0""", """norm2"""),
("""out_layers.3""", """conv2"""),
("""emb_layers.1""", """time_emb_proj"""),
("""skip_connection""", """conv_shortcut"""),
]
lowerCAmelCase : List[str] = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase : int = f'''down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase : List[str] = f'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase : Any = f'''down_blocks.{i}.attentions.{j}.'''
lowerCAmelCase : List[Any] = f'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase : Any = f'''up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase : str = f'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase : List[Any] = f'''up_blocks.{i}.attentions.{j}.'''
lowerCAmelCase : str = f'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase : Any = f'''down_blocks.{i}.downsamplers.0.conv.'''
lowerCAmelCase : Tuple = f'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase : Tuple = f'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase : Tuple = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase : Any = """mid_block.attentions.0."""
lowerCAmelCase : Dict = """middle_block.1."""
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase : int = f'''mid_block.resnets.{j}.'''
lowerCAmelCase : Union[str, Any] = f'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def A_ ( _UpperCAmelCase ):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
SCREAMING_SNAKE_CASE_: Dict = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
SCREAMING_SNAKE_CASE_: Optional[int] = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
SCREAMING_SNAKE_CASE_: Any = v.replace(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: str = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
SCREAMING_SNAKE_CASE_: Optional[Any] = v.replace(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] = v
SCREAMING_SNAKE_CASE_: Optional[Any] = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase : Union[str, Any] = [
# (stable-diffusion, HF Diffusers)
("""nin_shortcut""", """conv_shortcut"""),
("""norm_out""", """conv_norm_out"""),
("""mid.attn_1.""", """mid_block.attentions.0."""),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase : Union[str, Any] = f'''encoder.down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase : Optional[Any] = f'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase : Dict = f'''down_blocks.{i}.downsamplers.0.'''
lowerCAmelCase : List[str] = f'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase : List[str] = f'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase : int = f'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase : Any = f'''decoder.up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase : int = f'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase : str = f'''mid_block.resnets.{i}.'''
lowerCAmelCase : Tuple = f'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase : List[Any] = [
# (stable-diffusion, HF Diffusers)
("""norm.""", """group_norm."""),
("""q.""", """query."""),
("""k.""", """key."""),
("""v.""", """value."""),
("""proj_out.""", """proj_attn."""),
]
def A_ ( _UpperCAmelCase ):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape , 1 , 1 )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
SCREAMING_SNAKE_CASE_: Union[str, Any] = v.replace(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
SCREAMING_SNAKE_CASE_: Any = v.replace(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = v
SCREAMING_SNAKE_CASE_: Tuple = {v: vae_state_dict[k] for k, v in mapping.items()}
SCREAMING_SNAKE_CASE_: Union[str, Any] = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"mid.attn_1.{weight_name}.weight" in k:
print(f"Reshaping {k} for SD format" )
SCREAMING_SNAKE_CASE_: List[str] = reshape_weight_for_sd(_UpperCAmelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase : Optional[Any] = [
# (stable-diffusion, HF Diffusers)
("""resblocks.""", """text_model.encoder.layers."""),
("""ln_1""", """layer_norm1"""),
("""ln_2""", """layer_norm2"""),
(""".c_fc.""", """.fc1."""),
(""".c_proj.""", """.fc2."""),
(""".attn""", """.self_attn"""),
("""ln_final.""", """transformer.text_model.final_layer_norm."""),
("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""),
("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""),
]
lowerCAmelCase : Optional[Any] = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase : Optional[int] = re.compile("""|""".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase : str = {"""q""": 0, """k""": 1, """v""": 2}
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: List[str] = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
SCREAMING_SNAKE_CASE_: str = k[: -len(".q_proj.weight" )]
SCREAMING_SNAKE_CASE_: Dict = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
SCREAMING_SNAKE_CASE_: Tuple = [None, None, None]
SCREAMING_SNAKE_CASE_: Union[str, Any] = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
SCREAMING_SNAKE_CASE_: Union[str, Any] = k[: -len(".q_proj.bias" )]
SCREAMING_SNAKE_CASE_: Any = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
SCREAMING_SNAKE_CASE_: List[Any] = [None, None, None]
SCREAMING_SNAKE_CASE_: List[str] = v
continue
SCREAMING_SNAKE_CASE_: int = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
SCREAMING_SNAKE_CASE_: str = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: int = torch.cat(_UpperCAmelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
SCREAMING_SNAKE_CASE_: Optional[int] = textenc_pattern.sub(lambda _UpperCAmelCase : protected[re.escape(m.group(0 ) )] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = torch.cat(_UpperCAmelCase )
return new_state_dict
def A_ ( _UpperCAmelCase ):
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt."""
)
lowerCAmelCase : Optional[Any] = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase : int = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""")
lowerCAmelCase : List[str] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""")
lowerCAmelCase : Optional[int] = osp.join(args.model_path, """text_encoder""", """model.safetensors""")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase : Optional[int] = load_file(unet_path, device="""cpu""")
else:
lowerCAmelCase : Union[str, Any] = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""")
lowerCAmelCase : Optional[Any] = torch.load(unet_path, map_location="""cpu""")
if osp.exists(vae_path):
lowerCAmelCase : str = load_file(vae_path, device="""cpu""")
else:
lowerCAmelCase : List[Any] = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""")
lowerCAmelCase : Optional[Any] = torch.load(vae_path, map_location="""cpu""")
if osp.exists(text_enc_path):
lowerCAmelCase : List[Any] = load_file(text_enc_path, device="""cpu""")
else:
lowerCAmelCase : List[Any] = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""")
lowerCAmelCase : Optional[Any] = torch.load(text_enc_path, map_location="""cpu""")
# Convert the UNet model
lowerCAmelCase : int = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase : Optional[int] = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase : Union[str, Any] = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase : Optional[int] = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase : Any = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase : Any = {"""transformer.""" + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase : str = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase : Dict = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase : Any = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase : Optional[Any] = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase : Union[str, Any] = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase : str = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase : int = {"""state_dict""": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 671 | 0 |
import math
def _lowerCAmelCase ( __magic_name__ :str , __magic_name__ :str = 0 , __magic_name__ :List[str] = 0 ):
UpperCAmelCase_ = end or len(_UpperCAmelCase )
for i in range(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase_ = i
UpperCAmelCase_ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
UpperCAmelCase_ = array[temp_index - 1]
temp_index -= 1
UpperCAmelCase_ = temp_index_value
return array
def _lowerCAmelCase ( __magic_name__ :Any , __magic_name__ :int , __magic_name__ :str ): # Max Heap
UpperCAmelCase_ = index
UpperCAmelCase_ = 2 * index + 1 # Left Node
UpperCAmelCase_ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
UpperCAmelCase_ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
UpperCAmelCase_ = right_index
if largest != index:
UpperCAmelCase_ = array[largest], array[index]
heapify(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _lowerCAmelCase ( __magic_name__ :Tuple ):
UpperCAmelCase_ = len(_UpperCAmelCase )
for i in range(n // 2 , -1 , -1 ):
heapify(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for i in range(n - 1 , 0 , -1 ):
UpperCAmelCase_ = array[0], array[i]
heapify(_UpperCAmelCase , 0 , _UpperCAmelCase )
return array
def _lowerCAmelCase ( __magic_name__ :int , __magic_name__ :Optional[int] , __magic_name__ :Optional[Any] , __magic_name__ :Union[str, Any] ):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def _lowerCAmelCase ( __magic_name__ :Any , __magic_name__ :List[Any] , __magic_name__ :Tuple , __magic_name__ :Tuple ):
UpperCAmelCase_ = low
UpperCAmelCase_ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
UpperCAmelCase_ = array[j], array[i]
i += 1
def _lowerCAmelCase ( __magic_name__ :Tuple ):
if len(_UpperCAmelCase ) == 0:
return array
UpperCAmelCase_ = 2 * math.ceil(math.loga(len(_UpperCAmelCase ) ) )
UpperCAmelCase_ = 1_6
return intro_sort(_UpperCAmelCase , 0 , len(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase )
def _lowerCAmelCase ( __magic_name__ :List[str] , __magic_name__ :Union[str, Any] , __magic_name__ :List[Any] , __magic_name__ :Any , __magic_name__ :Tuple ):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_UpperCAmelCase )
max_depth -= 1
UpperCAmelCase_ = median_of_a(_UpperCAmelCase , _UpperCAmelCase , start + ((end - start) // 2) + 1 , end - 1 )
UpperCAmelCase_ = partition(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
intro_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase_ = p
return insertion_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase : Dict = input('Enter numbers separated by a comma : ').strip()
_lowerCamelCase : Tuple = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 121 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"""
),
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = '''xlm-prophetnet'''
_UpperCAmelCase : Any = ['''past_key_values''']
_UpperCAmelCase : Tuple = {
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self : str , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[Union[str, Callable]] = "gelu" , lowerCAmelCase__ : Optional[int] = 3_0522 , lowerCAmelCase__ : Optional[int] = 1024 , lowerCAmelCase__ : Optional[int] = 4096 , lowerCAmelCase__ : Optional[int] = 12 , lowerCAmelCase__ : Optional[int] = 16 , lowerCAmelCase__ : Optional[int] = 4096 , lowerCAmelCase__ : Optional[int] = 12 , lowerCAmelCase__ : Optional[int] = 16 , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[float] = 0.1 , lowerCAmelCase__ : Optional[int] = 512 , lowerCAmelCase__ : Optional[float] = 0.02 , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[int] = 0 , lowerCAmelCase__ : Optional[int] = 2 , lowerCAmelCase__ : Optional[int] = 32 , lowerCAmelCase__ : Optional[int] = 128 , lowerCAmelCase__ : Optional[bool] = False , lowerCAmelCase__ : Optional[float] = 0.0 , lowerCAmelCase__ : Optional[bool] = True , lowerCAmelCase__ : Optional[int] = 0 , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : Optional[int] = 2 , **lowerCAmelCase__ : List[str] , ):
SCREAMING_SNAKE_CASE_: List[Any] = vocab_size
SCREAMING_SNAKE_CASE_: int = hidden_size
SCREAMING_SNAKE_CASE_: Any = encoder_ffn_dim
SCREAMING_SNAKE_CASE_: Tuple = num_encoder_layers
SCREAMING_SNAKE_CASE_: List[Any] = num_encoder_attention_heads
SCREAMING_SNAKE_CASE_: Dict = decoder_ffn_dim
SCREAMING_SNAKE_CASE_: Any = num_decoder_layers
SCREAMING_SNAKE_CASE_: Tuple = num_decoder_attention_heads
SCREAMING_SNAKE_CASE_: str = max_position_embeddings
SCREAMING_SNAKE_CASE_: str = init_std # Normal(0, this parameter)
SCREAMING_SNAKE_CASE_: Dict = activation_function
# parameters for xlmprophetnet
SCREAMING_SNAKE_CASE_: Optional[int] = ngram
SCREAMING_SNAKE_CASE_: Tuple = num_buckets
SCREAMING_SNAKE_CASE_: Union[str, Any] = relative_max_distance
SCREAMING_SNAKE_CASE_: List[str] = disable_ngram_loss
SCREAMING_SNAKE_CASE_: Dict = eps
# 3 Types of Dropout
SCREAMING_SNAKE_CASE_: Any = attention_dropout
SCREAMING_SNAKE_CASE_: Optional[int] = activation_dropout
SCREAMING_SNAKE_CASE_: str = dropout
SCREAMING_SNAKE_CASE_: Optional[int] = use_cache
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , add_cross_attention=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Any):
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`.")
| 671 | 0 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
# Initialise PyTorch model
_lowercase : List[Any] = BertConfig.from_json_file(_UpperCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowercase : Tuple = BertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 89 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase : Dict = logging.get_logger(__name__)
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = b.T
SCREAMING_SNAKE_CASE_: Dict = np.sum(np.square(_UpperCAmelCase ) , axis=1 )
SCREAMING_SNAKE_CASE_: Tuple = np.sum(np.square(_UpperCAmelCase ) , axis=0 )
SCREAMING_SNAKE_CASE_: List[Any] = np.matmul(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = aa[:, None] - 2 * ab + ba[None, :]
return d
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = x.reshape(-1 , 3 )
SCREAMING_SNAKE_CASE_: Tuple = squared_euclidean_distance(_UpperCAmelCase , _UpperCAmelCase )
return np.argmin(_UpperCAmelCase , axis=1 )
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : int = ['''pixel_values''']
def __init__( self : Tuple , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : List[str] , ):
super().__init__(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = size if size is not None else {"height": 256, "width": 256}
SCREAMING_SNAKE_CASE_: Tuple = get_size_dict(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = np.array(lowerCAmelCase__) if clusters is not None else None
SCREAMING_SNAKE_CASE_: Dict = do_resize
SCREAMING_SNAKE_CASE_: str = size
SCREAMING_SNAKE_CASE_: List[Any] = resample
SCREAMING_SNAKE_CASE_: Optional[int] = do_normalize
SCREAMING_SNAKE_CASE_: Dict = do_color_quantize
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ):
SCREAMING_SNAKE_CASE_: List[str] = get_size_dict(lowerCAmelCase__)
if "height" not in size or "width" not in size:
raise ValueError(F"Size dictionary must contain both height and width keys. Got {size.keys()}")
return resize(
lowerCAmelCase__ , size=(size["height"], size["width"]) , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , ):
SCREAMING_SNAKE_CASE_: str = rescale(image=lowerCAmelCase__ , scale=1 / 127.5 , data_format=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = image - 1
return image
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[List[List[int]], np.ndarray]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **lowerCAmelCase__ : Union[str, Any] , ):
SCREAMING_SNAKE_CASE_: Tuple = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_: Optional[int] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_: Dict = get_size_dict(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_: int = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_: List[str] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
SCREAMING_SNAKE_CASE_: Tuple = clusters if clusters is not None else self.clusters
SCREAMING_SNAKE_CASE_: Optional[int] = np.array(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = make_list_of_images(lowerCAmelCase__)
if not valid_images(lowerCAmelCase__):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True.")
if do_color_quantize and clusters is None:
raise ValueError("Clusters must be specified if do_color_quantize is True.")
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_: Union[str, Any] = [to_numpy_array(lowerCAmelCase__) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_: Optional[Any] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_: str = [self.normalize(image=lowerCAmelCase__) for image in images]
if do_color_quantize:
SCREAMING_SNAKE_CASE_: Any = [to_channel_dimension_format(lowerCAmelCase__ , ChannelDimension.LAST) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
SCREAMING_SNAKE_CASE_: List[Any] = np.array(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = color_quantize(lowerCAmelCase__ , lowerCAmelCase__).reshape(images.shape[:-1])
# flatten to (batch_size, height*width)
SCREAMING_SNAKE_CASE_: str = images.shape[0]
SCREAMING_SNAKE_CASE_: Tuple = images.reshape(lowerCAmelCase__ , -1)
# We need to convert back to a list of images to keep consistent behaviour across processors.
SCREAMING_SNAKE_CASE_: str = list(lowerCAmelCase__)
else:
SCREAMING_SNAKE_CASE_: Dict = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__) for image in images]
SCREAMING_SNAKE_CASE_: Optional[Any] = {"input_ids": images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__)
| 671 | 0 |
def __A(lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = set()
# edges = list of graph's edges
_UpperCamelCase = get_edges(_UpperCAmelCase )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
_UpperCamelCase = edges.pop()
chosen_vertices.add(_UpperCAmelCase )
chosen_vertices.add(_UpperCAmelCase )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(_UpperCAmelCase )
return chosen_vertices
def __A(lowerCAmelCase ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 612 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Tuple = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : List[str] = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase : int = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
lowerCAmelCase : int = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
lowerCAmelCase : List[Any] = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
lowerCAmelCase : Optional[int] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowerCAmelCase : Optional[int] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
lowerCAmelCase : List[str] = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[Any] = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Any = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : str = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase : List[Any] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
lowerCAmelCase : Optional[Any] = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
lowerCAmelCase : int = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(UpperCAmelCase_ )
class __lowercase :
"""simple docstring"""
def __call__( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Union[bool, str] = False , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Optional[bool] = None , **lowerCAmelCase__ : Tuple , ):
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE_: List[str] = titles if texts is None else texts
return super().__call__(
lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Optional[int] = titles if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [titles]
SCREAMING_SNAKE_CASE_: int = texts if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [texts]
SCREAMING_SNAKE_CASE_: str = len(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = questions if not isinstance(lowerCAmelCase__ , lowerCAmelCase__) else [questions] * n_passages
if len(lowerCAmelCase__) != len(lowerCAmelCase__):
raise ValueError(
F"There should be as many titles than texts but got {len(lowerCAmelCase__)} titles and {len(lowerCAmelCase__)} texts.")
SCREAMING_SNAKE_CASE_: Optional[Any] = super().__call__(lowerCAmelCase__ , lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)["input_ids"]
SCREAMING_SNAKE_CASE_: Union[str, Any] = super().__call__(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__)["input_ids"]
SCREAMING_SNAKE_CASE_: int = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase__ , lowerCAmelCase__)
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE_: Dict = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
SCREAMING_SNAKE_CASE_: int = attention_mask
return self.pad(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : BatchEncoding , lowerCAmelCase__ : DPRReaderOutput , lowerCAmelCase__ : int = 16 , lowerCAmelCase__ : int = 64 , lowerCAmelCase__ : int = 4 , ):
SCREAMING_SNAKE_CASE_: int = reader_input["input_ids"]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = reader_output[:3]
SCREAMING_SNAKE_CASE_: Tuple = len(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = sorted(range(lowerCAmelCase__) , reverse=lowerCAmelCase__ , key=relevance_logits.__getitem__)
SCREAMING_SNAKE_CASE_: List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE_: Optional[int] = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE_: str = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE_: List[Any] = sequence_ids.index(self.pad_token_id)
else:
SCREAMING_SNAKE_CASE_: Dict = len(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowerCAmelCase__ , top_spans=lowerCAmelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowerCAmelCase__ , start_index=lowerCAmelCase__ , end_index=lowerCAmelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(lowerCAmelCase__) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , ):
SCREAMING_SNAKE_CASE_: Any = []
for start_index, start_score in enumerate(lowerCAmelCase__):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
SCREAMING_SNAKE_CASE_: Union[str, Any] = sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__: x[1] , reverse=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"Wrong span indices: [{start_index}:{end_index}]")
SCREAMING_SNAKE_CASE_: int = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"Span is too long: {length} > {max_answer_length}")
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(lowerCAmelCase__) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(UpperCAmelCase_ )
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Any = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[Any] = READER_PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : int = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Optional[int] = READER_PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase : str = ['''input_ids''', '''attention_mask''']
| 671 | 0 |
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("""0.8.3"""):
raise Exception("""requires gluonnlp == 0.8.3""")
if version.parse(mx.__version__) != version.parse("""1.5.0"""):
raise Exception("""requires mxnet == 1.5.0""")
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
lowercase__ = """The Nymphenburg Palace is a beautiful palace in Munich!"""
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase_ : Any = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
lowerCAmelCase_ : Union[str, Any] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
lowerCAmelCase_ : List[str] = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=_UpperCAmelCase , output_all_encodings=_UpperCAmelCase , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , _UpperCAmelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
lowerCAmelCase_ : List[str] = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
lowerCAmelCase_ : str = os.path.join(get_home_dir() , "models" )
lowerCAmelCase_ : List[Any] = _load_vocab(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , cls=_UpperCAmelCase )
lowerCAmelCase_ : List[Any] = nlp.model.BERTModel(
_UpperCAmelCase , len(_UpperCAmelCase ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=_UpperCAmelCase , use_token_type_embed=_UpperCAmelCase , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=_UpperCAmelCase , use_decoder=_UpperCAmelCase , )
original_bort.load_parameters(_UpperCAmelCase , cast_dtype=_UpperCAmelCase , ignore_extra=_UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
lowerCAmelCase_ : Any = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(_UpperCAmelCase ),
}
lowerCAmelCase_ : List[Any] = BertConfig.from_dict(_UpperCAmelCase )
lowerCAmelCase_ : Tuple = BertForMaskedLM(_UpperCAmelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__UpperCamelCase ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__UpperCamelCase , __UpperCamelCase ):
lowerCAmelCase_ : str = hf_param.shape
lowerCAmelCase_ : int = to_torch(params[gluon_param] )
lowerCAmelCase_ : Dict = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
lowerCAmelCase_ : Dict = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
lowerCAmelCase_ : int = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
lowerCAmelCase_ : int = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
lowerCAmelCase_ : Optional[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
lowerCAmelCase_ : Any = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
lowerCAmelCase_ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
lowerCAmelCase_ : BertSelfAttention = layer.attention.self
lowerCAmelCase_ : Optional[Any] = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
lowerCAmelCase_ : Tuple = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
lowerCAmelCase_ : List[str] = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
lowerCAmelCase_ : List[str] = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
lowerCAmelCase_ : Any = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
lowerCAmelCase_ : List[Any] = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
lowerCAmelCase_ : BertSelfOutput = layer.attention.output
lowerCAmelCase_ : Optional[Any] = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
lowerCAmelCase_ : List[str] = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
lowerCAmelCase_ : Any = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
lowerCAmelCase_ : int = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
lowerCAmelCase_ : BertIntermediate = layer.intermediate
lowerCAmelCase_ : Union[str, Any] = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
lowerCAmelCase_ : Tuple = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
lowerCAmelCase_ : BertOutput = layer.output
lowerCAmelCase_ : Optional[int] = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
lowerCAmelCase_ : int = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
lowerCAmelCase_ : str = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
lowerCAmelCase_ : int = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
lowerCAmelCase_ : Tuple = RobertaTokenizer.from_pretrained("roberta-base" )
lowerCAmelCase_ : Optional[int] = tokenizer.encode_plus(_UpperCAmelCase )["input_ids"]
# Get gluon output
lowerCAmelCase_ : Dict = mx.nd.array([input_ids] )
lowerCAmelCase_ : Optional[Any] = original_bort(inputs=_UpperCAmelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = BertModel.from_pretrained(_UpperCAmelCase )
hf_bort_model.eval()
lowerCAmelCase_ : Union[str, Any] = tokenizer.encode_plus(_UpperCAmelCase , return_tensors="pt" )
lowerCAmelCase_ : Dict = hf_bort_model(**_UpperCAmelCase )[0]
lowerCAmelCase_ : List[Any] = output_gluon[0].asnumpy()
lowerCAmelCase_ : List[Any] = output_hf[0].detach().numpy()
lowerCAmelCase_ : Optional[Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
lowerCAmelCase_ : List[str] = np.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , _UpperCAmelCase )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowercase__ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 610 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = DistilBertTokenizer
_UpperCAmelCase : Union[str, Any] = DistilBertTokenizerFast
_UpperCAmelCase : int = True
@slow
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[Any] = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")
SCREAMING_SNAKE_CASE_: Any = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 671 | 0 |
def _UpperCAmelCase ( UpperCAmelCase : Tuple=28_123 ):
"""simple docstring"""
__lowerCamelCase : int = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
__lowerCamelCase : str = set()
__lowerCamelCase : Optional[int] = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(_UpperCAmelCase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 519 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowerCAmelCase : List[Any] = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""])
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[int] = test_results.split(" " )
SCREAMING_SNAKE_CASE_: Tuple = 0
SCREAMING_SNAKE_CASE_: str = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
SCREAMING_SNAKE_CASE_: Optional[Any] = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(_UpperCAmelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = {}
SCREAMING_SNAKE_CASE_: Any = None
SCREAMING_SNAKE_CASE_: Union[str, Any] = False
for line in failures_short_lines.split("\n" ):
if re.search(R"_ \[doctest\]" , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = True
SCREAMING_SNAKE_CASE_: Dict = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
SCREAMING_SNAKE_CASE_: Union[str, Any] = line
SCREAMING_SNAKE_CASE_: List[str] = False
return failures
class __lowercase :
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Dict = title
SCREAMING_SNAKE_CASE_: int = doc_test_results["time_spent"].split(",")[0]
SCREAMING_SNAKE_CASE_: int = doc_test_results["success"]
SCREAMING_SNAKE_CASE_: Optional[Any] = doc_test_results["failures"]
SCREAMING_SNAKE_CASE_: Any = self.n_success + self.n_failures
# Failures and success of the modeling tests
SCREAMING_SNAKE_CASE_: Optional[int] = doc_test_results
@property
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: int = [self._time_spent]
SCREAMING_SNAKE_CASE_: List[Any] = 0
for time in time_spent:
SCREAMING_SNAKE_CASE_: Union[str, Any] = time.split(":")
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowerCAmelCase__) == 1:
SCREAMING_SNAKE_CASE_: Dict = [0, 0, time_parts[0]]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = int(time_parts[0]), int(time_parts[1]), float(time_parts[2])
total_secs += hours * 3600 + minutes * 60 + seconds
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F"{int(lowerCAmelCase__)}h{int(lowerCAmelCase__)}m{int(lowerCAmelCase__)}s"
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
F" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[Any] = 40
SCREAMING_SNAKE_CASE_: List[str] = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase__ , lowerCAmelCase__)}
SCREAMING_SNAKE_CASE_: Tuple = ""
for category, failures in category_failures.items():
if len(lowerCAmelCase__) == 0:
continue
if report != "":
report += "\n\n"
report += F"*{category} failures*:".ljust(line_length // 2).rjust(line_length // 2) + "\n"
report += "`"
report += "`\n`".join(lowerCAmelCase__)
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: Optional[Any] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures)
if self.n_failures > 0:
blocks.extend([self.category_failures])
if self.n_failures == 0:
blocks.append(self.no_failures)
return json.dumps(lowerCAmelCase__)
@staticmethod
def _SCREAMING_SNAKE_CASE ( ):
SCREAMING_SNAKE_CASE_: List[str] = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print("Sending the following payload")
print(json.dumps({"blocks": json.loads(lowerCAmelCase__)}))
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=lowerCAmelCase__ , )
def _SCREAMING_SNAKE_CASE ( self : Tuple):
print("Sending the following payload")
print(json.dumps({"blocks": json.loads(self.payload)}))
SCREAMING_SNAKE_CASE_: Optional[Any] = F"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed."
SCREAMING_SNAKE_CASE_: List[Any] = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=lowerCAmelCase__ , )
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Dict = ""
for key, value in failures.items():
SCREAMING_SNAKE_CASE_: str = value[:200] + " [Truncated]" if len(lowerCAmelCase__) > 250 else value
failures_text += F"*{key}*\n_{value}_\n\n"
SCREAMING_SNAKE_CASE_: Any = job_name
SCREAMING_SNAKE_CASE_: List[Any] = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
SCREAMING_SNAKE_CASE_: Tuple = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _SCREAMING_SNAKE_CASE ( self : Any):
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made.")
SCREAMING_SNAKE_CASE_: Tuple = self.doc_test_results.pop("job_link")
self.doc_test_results.pop("failures")
self.doc_test_results.pop("success")
self.doc_test_results.pop("time_spent")
SCREAMING_SNAKE_CASE_: Any = sorted(self.doc_test_results.items() , key=lambda lowerCAmelCase__: t[0])
for job, job_result in sorted_dict:
if len(job_result["failures"]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = F"*Num failures* :{len(job_result['failed'])} \n"
SCREAMING_SNAKE_CASE_: Optional[Any] = job_result["failures"]
SCREAMING_SNAKE_CASE_: Optional[Any] = self.get_reply_blocks(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , text=lowerCAmelCase__)
print("Sending the following reply")
print(json.dumps({"blocks": blocks}))
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F"Results for {job}" , blocks=lowerCAmelCase__ , thread_ts=self.thread_ts["ts"] , )
time.sleep(1)
def A_ ( ):
SCREAMING_SNAKE_CASE_: Tuple = os.environ["GITHUB_RUN_ID"]
SCREAMING_SNAKE_CASE_: Any = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
SCREAMING_SNAKE_CASE_: List[Any] = requests.get(_UpperCAmelCase ).json()
SCREAMING_SNAKE_CASE_: Optional[Any] = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
SCREAMING_SNAKE_CASE_: Any = math.ceil((result["total_count"] - 1_00) / 1_00 )
for i in range(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , _UpperCAmelCase )
return {}
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = {}
if os.path.exists(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[str] = os.listdir(_UpperCAmelCase )
for file in files:
try:
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE_: Dict = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(_UpperCAmelCase , _UpperCAmelCase )}." ) from e
return _artifact
def A_ ( ):
class __lowercase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : str):
SCREAMING_SNAKE_CASE_: Dict = name
SCREAMING_SNAKE_CASE_: List[str] = []
def __str__( self : Optional[Any]):
return self.name
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : str):
self.paths.append({"name": self.name, "path": path})
SCREAMING_SNAKE_CASE_: Dict[str, Artifact] = {}
SCREAMING_SNAKE_CASE_: List[Any] = filter(os.path.isdir , os.listdir() )
for directory in directories:
SCREAMING_SNAKE_CASE_: Dict = directory
if artifact_name not in _available_artifacts:
SCREAMING_SNAKE_CASE_: Tuple = Artifact(_UpperCAmelCase )
_available_artifacts[artifact_name].add_path(_UpperCAmelCase )
return _available_artifacts
if __name__ == "__main__":
lowerCAmelCase : Tuple = get_job_links()
lowerCAmelCase : Optional[Any] = retrieve_available_artifacts()
lowerCAmelCase : Any = collections.OrderedDict(
[
("""*.py""", """API Examples"""),
("""*.md""", """MD Examples"""),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowerCAmelCase : int = {
v: {
"""failed""": [],
"""failures""": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowerCAmelCase : Optional[int] = github_actions_job_links.get("""run_doctests""")
lowerCAmelCase : List[Any] = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0]
lowerCAmelCase : Any = retrieve_artifact(artifact_path["""name"""])
if "stats" in artifact:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = handle_test_results(artifact["""stats"""])
lowerCAmelCase : List[str] = failed
lowerCAmelCase : Any = success
lowerCAmelCase : Dict = time_spent[1:-1] + """, """
lowerCAmelCase : str = extract_first_line_failure(artifact["""failures_short"""])
for line in artifact["summary_short"].split("""\n"""):
if re.search("""FAILED""", line):
lowerCAmelCase : Tuple = line.replace("""FAILED """, """""")
lowerCAmelCase : str = line.split()[0].replace("""\n""", """""")
if "::" in line:
lowerCAmelCase , lowerCAmelCase : Optional[int] = line.split("""::""")
else:
lowerCAmelCase , lowerCAmelCase : str = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowerCAmelCase : str = docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowerCAmelCase : str = all_failures[test] if test in all_failures else """N/A"""
lowerCAmelCase : Any = failure
break
lowerCAmelCase : Union[str, Any] = Message("""🤗 Results of the doc tests.""", doc_test_results)
message.post()
message.post_reply()
| 671 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
lowercase_ = 42
lowercase_ = 42
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[Any] , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = [[] for _ in range(lowerCAmelCase__ )]
SCREAMING_SNAKE_CASE_ = size
def __getitem__( self : str , _lowerCAmelCase : int ):
return iter(self._graph[vertex] )
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return self._size
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ):
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(lowerCAmelCase__ , lowerCAmelCase__ ) )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = deque([start_vertex] )
SCREAMING_SNAKE_CASE_ = [None] * self.size
SCREAMING_SNAKE_CASE_ = 0
while queue:
SCREAMING_SNAKE_CASE_ = queue.popleft()
SCREAMING_SNAKE_CASE_ = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
SCREAMING_SNAKE_CASE_ = current_distance + edge.weight
SCREAMING_SNAKE_CASE_ = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and new_distance >= dest_vertex_distance
):
continue
SCREAMING_SNAKE_CASE_ = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod() | 31 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase : str = 16
lowerCAmelCase : List[Any] = 32
def A_ ( _UpperCAmelCase , _UpperCAmelCase = 16 ):
SCREAMING_SNAKE_CASE_: List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE_: Tuple = load_dataset("glue" , "mrpc" )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_: str = datasets.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_: List[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_: Tuple = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_: int = 8
else:
SCREAMING_SNAKE_CASE_: Any = None
return tokenizer.pad(
_UpperCAmelCase , padding="longest" , max_length=_UpperCAmelCase , pad_to_multiple_of=_UpperCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Optional[Any] = DataLoader(
tokenized_datasets["train"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Tuple = DataLoader(
tokenized_datasets["validation"] , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase : Optional[int] = mocked_dataloaders # noqa: F811
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _UpperCAmelCase ) == "1":
SCREAMING_SNAKE_CASE_: Tuple = 2
# New Code #
SCREAMING_SNAKE_CASE_: List[str] = int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE_: int = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCAmelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_: Tuple = config["lr"]
SCREAMING_SNAKE_CASE_: List[str] = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE_: List[str] = int(config["seed"] )
SCREAMING_SNAKE_CASE_: Optional[int] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE_: str = evaluate.load("glue" , "mrpc" )
set_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = get_dataloaders(_UpperCAmelCase , _UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_: Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_: List[Any] = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_: Union[str, Any] = AdamW(params=model.parameters() , lr=_UpperCAmelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_: str = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase , num_warmup_steps=1_00 , num_training_steps=(len(_UpperCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = accelerator.prepare(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = output.loss
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[Any] = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[Any] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_UpperCAmelCase , references=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE_: List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , _UpperCAmelCase )
def A_ ( ):
SCREAMING_SNAKE_CASE_: str = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_UpperCAmelCase , default=_UpperCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=_UpperCAmelCase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE_: List[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE_: Tuple = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 671 | 0 |
"""simple docstring"""
from manim import *
class lowerCAmelCase__ ( UpperCAmelCase_ ):
"""simple docstring"""
def snake_case ( self : Any ):
__lowercase : Optional[Any] = Rectangle(height=0.5 , width=0.5 )
__lowercase : str = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
__lowercase : Any = [mem.copy() for i in range(6 )]
__lowercase : Any = [mem.copy() for i in range(6 )]
__lowercase : Any = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__lowercase : Dict = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__lowercase : str = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__lowercase : Dict = Text("CPU" , font_size=2_4 )
__lowercase : Dict = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCAmelCase__ )
__lowercase : Optional[Any] = [mem.copy() for i in range(1 )]
__lowercase : Dict = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__lowercase : Union[str, Any] = Text("GPU" , font_size=2_4 )
__lowercase : Dict = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
gpu.align_to(lowerCAmelCase__ , lowerCAmelCase__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCAmelCase__ )
__lowercase : Tuple = [mem.copy() for i in range(6 )]
__lowercase : List[str] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__lowercase : List[str] = Text("Model" , font_size=2_4 )
__lowercase : str = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCAmelCase__ , run_time=1 ) , Create(lowerCAmelCase__ , run_time=1 ) , Create(lowerCAmelCase__ , run_time=1 ) , )
__lowercase : Dict = MarkupText(
f'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=2_4 , )
__lowercase : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowercase : Tuple = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase__ , run_time=2.5 ) , Write(lowerCAmelCase__ ) , Write(lowerCAmelCase__ ) )
self.add(lowerCAmelCase__ )
__lowercase : Tuple = []
__lowercase : Union[str, Any] = []
__lowercase : List[str] = []
for i, rect in enumerate(lowerCAmelCase__ ):
__lowercase : Dict = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase__ , opacity=0.7 )
cpu_target.move_to(lowerCAmelCase__ )
cpu_target.generate_target()
__lowercase : Optional[int] = 0.4_6 / 4
__lowercase : str = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowerCAmelCase__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCAmelCase__ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCAmelCase__ , buff=0.0 )
cpu_targs.append(lowerCAmelCase__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCAmelCase__ ) )
second_animations.append(MoveToTarget(lowerCAmelCase__ , run_time=1.5 ) )
self.play(*lowerCAmelCase__ )
self.play(*lowerCAmelCase__ )
self.wait()
| 575 |
from math import asin, atan, cos, radians, sin, sqrt, tan
lowerCAmelCase : Union[str, Any] = 637_8137.0
lowerCAmelCase : int = 635_6752.31_4245
lowerCAmelCase : Union[str, Any] = 6378137
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: List[Any] = (AXIS_A - AXIS_B) / AXIS_A
SCREAMING_SNAKE_CASE_: str = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
SCREAMING_SNAKE_CASE_: Optional[int] = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) )
SCREAMING_SNAKE_CASE_: Any = radians(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Dict = radians(_UpperCAmelCase )
# Equation
SCREAMING_SNAKE_CASE_: str = sin((phi_a - phi_a) / 2 )
SCREAMING_SNAKE_CASE_: List[Any] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
SCREAMING_SNAKE_CASE_: Tuple = sqrt(sin_sq_phi + (cos(_UpperCAmelCase ) * cos(_UpperCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 0 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def _lowercase ( lowerCamelCase__ = 150_0000 ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : defaultdict = defaultdict(_UpperCAmelCase )
__UpperCAmelCase : Any = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , _UpperCAmelCase , 2 ):
if gcd(_UpperCAmelCase , _UpperCAmelCase ) > 1:
continue
__UpperCAmelCase : str = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(_UpperCAmelCase , limit + 1 , _UpperCAmelCase ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 168 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_: List[Any] = BertConfig.from_json_file(_UpperCAmelCase )
print(f"Building PyTorch model from configuration: {config}" )
SCREAMING_SNAKE_CASE_: Tuple = BertForPreTraining(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 671 | 0 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _lowercase ( UpperCAmelCase_ ):
def __init__( self : Any , snake_case : int , snake_case : Optional[Any]=1_3 , snake_case : Union[str, Any]=7 , snake_case : Optional[int]=True , snake_case : Any=True , snake_case : Optional[int]=False , snake_case : Any=True , snake_case : Dict=9_9 , snake_case : Optional[int]=3_2 , snake_case : Any=5 , snake_case : Optional[Any]=4 , snake_case : Optional[Any]=6_4 , snake_case : Optional[int]="gelu" , snake_case : Dict=0.1 , snake_case : Tuple=0.1 , snake_case : List[str]=5_1_2 , snake_case : Optional[Any]=1_6 , snake_case : Dict=2 , snake_case : str=0.02 , snake_case : Any=3 , snake_case : List[str]=4 , snake_case : str=None , snake_case : int=2 , snake_case : int=2 , snake_case : Tuple=2 , snake_case : int=2 , snake_case : Any=4 , snake_case : Optional[int]=1 , ) -> int:
"""simple docstring"""
UpperCamelCase_ : Dict = parent
UpperCamelCase_ : Any = batch_size
UpperCamelCase_ : List[str] = seq_length
UpperCamelCase_ : List[Any] = is_training
UpperCamelCase_ : Union[str, Any] = use_input_mask
UpperCamelCase_ : str = use_token_type_ids
UpperCamelCase_ : List[Any] = use_labels
UpperCamelCase_ : int = vocab_size
UpperCamelCase_ : List[str] = hidden_size
UpperCamelCase_ : List[str] = num_hidden_layers
UpperCamelCase_ : Any = num_attention_heads
UpperCamelCase_ : Optional[Any] = intermediate_size
UpperCamelCase_ : str = hidden_act
UpperCamelCase_ : str = hidden_dropout_prob
UpperCamelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase_ : Any = max_position_embeddings
UpperCamelCase_ : Dict = type_vocab_size
UpperCamelCase_ : Dict = type_sequence_label_size
UpperCamelCase_ : str = initializer_range
UpperCamelCase_ : Dict = num_labels
UpperCamelCase_ : Optional[int] = num_choices
UpperCamelCase_ : List[Any] = scope
UpperCamelCase_ : List[str] = q_groups
UpperCamelCase_ : str = k_groups
UpperCamelCase_ : Tuple = v_groups
UpperCamelCase_ : Optional[int] = post_attention_groups
UpperCamelCase_ : Optional[int] = intermediate_groups
UpperCamelCase_ : List[Any] = output_groups
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ : Optional[Any] = None
if self.use_input_mask:
UpperCamelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ : Tuple = None
UpperCamelCase_ : str = None
UpperCamelCase_ : Optional[Any] = None
if self.use_labels:
UpperCamelCase_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_ : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : List[str] , snake_case : Tuple , snake_case : Tuple , snake_case : Dict , snake_case : Union[str, Any] , snake_case : Optional[int] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Any = SqueezeBertModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCamelCase_ : Dict = model(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCamelCase_ : Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : Optional[int] , snake_case : Tuple , snake_case : Tuple , snake_case : List[Any] , snake_case : int , snake_case : str ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = SqueezeBertForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCamelCase_ : List[Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : List[Any] , snake_case : Union[str, Any] , snake_case : str , snake_case : Tuple , snake_case : List[str] , snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : int = SqueezeBertForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCamelCase_ : Any = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : List[Any] , snake_case : Tuple , snake_case : Optional[int] , snake_case : Dict , snake_case : Tuple , snake_case : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self.num_labels
UpperCamelCase_ : Optional[int] = SqueezeBertForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCamelCase_ : Dict = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : Tuple , snake_case : Dict , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : str , snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = self.num_labels
UpperCamelCase_ : Optional[int] = SqueezeBertForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCamelCase_ : List[str] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : List[str] , snake_case : List[Any] , snake_case : Union[str, Any] , snake_case : int ) -> int:
"""simple docstring"""
UpperCamelCase_ : int = self.num_choices
UpperCamelCase_ : Optional[Any] = SqueezeBertForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCamelCase_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ : Optional[int] = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : int = self.prepare_config_and_inputs()
(UpperCamelCase_) : Optional[int] = config_and_inputs
UpperCamelCase_ : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
lowercase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = True
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Dict = SqueezeBertModelTester(self )
UpperCamelCase_ : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , dim=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> int:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCAmelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> str:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ : List[Any] = SqueezeBertModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_sentencepiece
@require_tokenizers
@require_torch
class _lowercase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Tuple = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
UpperCamelCase_ : Optional[Any] = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] )
UpperCamelCase_ : str = model(lowerCAmelCase__ )[0]
UpperCamelCase_ : Tuple = torch.Size((1, 3) )
self.assertEqual(output.shape , lowerCAmelCase__ )
UpperCamelCase_ : Union[str, Any] = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-4 ) )
| 417 |
import math
def A_ ( _UpperCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A_ ( _UpperCAmelCase = 0.1 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = 3
SCREAMING_SNAKE_CASE_: Optional[int] = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_UpperCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 671 | 0 |
"""simple docstring"""
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__A = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"""text-classification""",
"""language-modeling""",
"""summarization""",
"""token-classification""",
"""question-answering""",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__A = logging.getLogger()
def lowercase_ ( ) -> List[str]:
'''simple docstring'''
__lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("-f" )
__lowerCamelCase : Union[str, Any] = parser.parse_args()
return args.f
def lowercase_ ( _lowerCamelCase: Any , _lowerCamelCase: Any="eval" ) -> List[str]:
'''simple docstring'''
__lowerCamelCase : Optional[int] = os.path.join(_UpperCAmelCase , F"""{split}_results.json""" )
if os.path.exists(_UpperCAmelCase ):
with open(_UpperCAmelCase , "r" ) as f:
return json.load(_UpperCAmelCase )
raise ValueError(F"""can't find {path}""" )
__A = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _snake_case ( UpperCAmelCase_ ):
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : str = self.get_auto_remove_tmp_dir()
__lowerCamelCase : Optional[int] = F"""\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n """.split()
with patch.object(lowerCAmelCase__ , "argv" , lowerCAmelCase__ ):
run_flax_glue.main()
__lowerCamelCase : List[Any] = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : Optional[int] = self.get_auto_remove_tmp_dir()
__lowerCamelCase : int = F"""\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n """.split()
with patch.object(lowerCAmelCase__ , "argv" , lowerCAmelCase__ ):
run_clm_flax.main()
__lowerCamelCase : int = get_results(lowerCAmelCase__ )
self.assertLess(result["eval_perplexity"] , 100 )
@slow
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : int = self.get_auto_remove_tmp_dir()
__lowerCamelCase : Any = F"""\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n """.split()
with patch.object(lowerCAmelCase__ , "argv" , lowerCAmelCase__ ):
run_summarization_flax.main()
__lowerCamelCase : Optional[int] = get_results(lowerCAmelCase__ , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 10 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__lowerCamelCase : Optional[Any] = F"""\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n """.split()
with patch.object(lowerCAmelCase__ , "argv" , lowerCAmelCase__ ):
run_mlm_flax.main()
__lowerCamelCase : List[str] = get_results(lowerCAmelCase__ )
self.assertLess(result["eval_perplexity"] , 42 )
@slow
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Any = self.get_auto_remove_tmp_dir()
__lowerCamelCase : Optional[int] = F"""\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n """.split()
with patch.object(lowerCAmelCase__ , "argv" , lowerCAmelCase__ ):
run_ta_mlm_flax.main()
__lowerCamelCase : Tuple = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.4_2 )
@slow
def lowerCamelCase__ ( self : Dict ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__lowerCamelCase : str = 7 if get_gpu_count() > 1 else 2
__lowerCamelCase : Tuple = self.get_auto_remove_tmp_dir()
__lowerCamelCase : str = F"""\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n """.split()
with patch.object(lowerCAmelCase__ , "argv" , lowerCAmelCase__ ):
run_flax_ner.main()
__lowerCamelCase : Optional[int] = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : List[Any] = self.get_auto_remove_tmp_dir()
__lowerCamelCase : List[str] = F"""\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n """.split()
with patch.object(lowerCAmelCase__ , "argv" , lowerCAmelCase__ ):
run_qa.main()
__lowerCamelCase : Optional[int] = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result["eval_f1"] , 30 )
self.assertGreaterEqual(result["eval_exact"] , 30 ) | 646 |
import re
def A_ ( _UpperCAmelCase ):
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
try:
SCREAMING_SNAKE_CASE_: List[Any] = split_input(_UpperCAmelCase )
if upper:
SCREAMING_SNAKE_CASE_: List[str] = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
SCREAMING_SNAKE_CASE_: Optional[int] = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def A_ ( _UpperCAmelCase ):
return to_simple_case(_UpperCAmelCase )
def A_ ( _UpperCAmelCase ):
try:
SCREAMING_SNAKE_CASE_: Optional[int] = to_simple_case(_UpperCAmelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
return to_complex_case(_UpperCAmelCase , _UpperCAmelCase , "_" )
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
return to_complex_case(_UpperCAmelCase , _UpperCAmelCase , "-" )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 671 | 0 |
'''simple docstring'''
import requests
__magic_name__ : int = """YOUR API KEY"""
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = giphy_api_key ):
'''simple docstring'''
_snake_case = "+".join(query.split() )
_snake_case = f'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
_snake_case = requests.get(SCREAMING_SNAKE_CASE__ ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("""\n""".join(get_gifs("""space ship""")))
| 672 |
'''simple docstring'''
from torch import nn
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' )
| 672 | 1 |
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase="None" , lowerCamelCase=3 , lowerCamelCase=4 , lowerCamelCase=None , ):
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = relative_attention
_snake_case = position_biased_input
_snake_case = pos_att_type
_snake_case = scope
def UpperCamelCase( self ):
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_input_mask:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = ids_tensor([self.batch_size] , self.num_choices )
_snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCamelCase( self , lowerCamelCase ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
_snake_case = DebertaVaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_snake_case = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase )[0]
_snake_case = model(lowerCamelCase , token_type_ids=lowerCamelCase )[0]
_snake_case = model(lowerCamelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
_snake_case = DebertaVaForMaskedLM(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_snake_case = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
_snake_case = self.num_labels
_snake_case = DebertaVaForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_snake_case = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
_snake_case = self.num_labels
_snake_case = DebertaVaForTokenClassification(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_snake_case = model(lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
_snake_case = DebertaVaForQuestionAnswering(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_snake_case = model(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
_snake_case = DebertaVaForMultipleChoice(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = model(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase( self ):
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Tuple = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[Any] = True
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : List[Any] = False
def UpperCamelCase( self ):
_snake_case = DebertaVaModelTester(self )
_snake_case = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
def UpperCamelCase( self ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCamelCase )
def UpperCamelCase( self ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCamelCase )
def UpperCamelCase( self ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCamelCase )
def UpperCamelCase( self ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCamelCase )
def UpperCamelCase( self ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCamelCase )
def UpperCamelCase( self ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = DebertaVaModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="Model not available yet" )
def UpperCamelCase( self ):
pass
@slow
def UpperCamelCase( self ):
_snake_case = DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge" )
_snake_case = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_snake_case = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_snake_case = model(lowerCamelCase , attention_mask=lowerCamelCase )[0]
# compare the actual values for a slice.
_snake_case = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase , atol=1e-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 672 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__magic_name__ : Tuple = 0
__magic_name__ : Dict = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__magic_name__ : str = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__magic_name__ : Dict = tuple[int, int]
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
_snake_case = pos_x
_snake_case = pos_y
_snake_case = (pos_y, pos_x)
_snake_case = goal_x
_snake_case = goal_y
_snake_case = g_cost
_snake_case = parent
_snake_case = self.calculate_heuristic()
_snake_case = self.g_cost + self.h_cost
def UpperCamelCase( self ):
_snake_case = self.pos_x - self.goal_x
_snake_case = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCamelCase ) + abs(lowerCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , lowerCamelCase ):
return self.f_cost < other.f_cost
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase ):
_snake_case = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase )
_snake_case = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , lowerCamelCase )
_snake_case = [self.start]
_snake_case = []
_snake_case = False
def UpperCamelCase( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_snake_case = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCamelCase )
self.closed_nodes.append(lowerCamelCase )
_snake_case = self.get_successors(lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase )
else:
# retrieve the best current path
_snake_case = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase )
else:
self.open_nodes.append(lowerCamelCase )
return [self.start.pos]
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = []
for action in delta:
_snake_case = parent.pos_x + action[1]
_snake_case = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase , lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase , ) )
return successors
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = node
_snake_case = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_snake_case = current_node.parent
path.reverse()
return path
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase ):
_snake_case = AStar(lowerCamelCase , lowerCamelCase )
_snake_case = AStar(lowerCamelCase , lowerCamelCase )
_snake_case = False
def UpperCamelCase( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_snake_case = self.fwd_astar.open_nodes.pop(0 )
_snake_case = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCamelCase , lowerCamelCase )
self.fwd_astar.closed_nodes.append(lowerCamelCase )
self.bwd_astar.closed_nodes.append(lowerCamelCase )
_snake_case = current_bwd_node
_snake_case = current_fwd_node
_snake_case = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCamelCase )
else:
# retrieve the best current path
_snake_case = astar.open_nodes.pop(
astar.open_nodes.index(lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCamelCase )
else:
astar.open_nodes.append(lowerCamelCase )
return [self.fwd_astar.start.pos]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
_snake_case = self.fwd_astar.retrace_path(lowerCamelCase )
_snake_case = self.bwd_astar.retrace_path(lowerCamelCase )
bwd_path.pop()
bwd_path.reverse()
_snake_case = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__magic_name__ : Optional[int] = (0, 0)
__magic_name__ : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__magic_name__ : Any = time.time()
__magic_name__ : Optional[int] = AStar(init, goal)
__magic_name__ : str = a_star.search()
__magic_name__ : List[Any] = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
__magic_name__ : List[str] = time.time()
__magic_name__ : Optional[Any] = BidirectionalAStar(init, goal)
__magic_name__ : Optional[int] = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 672 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__magic_name__ : int = logging.get_logger(__name__)
__magic_name__ : List[Any] = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for attribute in key.split("." ):
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
_snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_snake_case = value
elif weight_type == "weight_g":
_snake_case = value
elif weight_type == "weight_v":
_snake_case = value
elif weight_type == "bias":
_snake_case = value
else:
_snake_case = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = []
_snake_case = fairseq_model.state_dict()
_snake_case = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_snake_case = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == "group" , )
_snake_case = True
else:
for key, mapped_key in MAPPING.items():
_snake_case = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_snake_case = True
if "*" in mapped_key:
_snake_case = name.split(SCREAMING_SNAKE_CASE__ )[0].split("." )[-2]
_snake_case = mapped_key.replace("*" , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
_snake_case = "weight_g"
elif "weight_v" in name:
_snake_case = "weight_v"
elif "weight" in name:
_snake_case = "weight"
elif "bias" in name:
_snake_case = "bias"
else:
_snake_case = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = full_name.split("conv_layers." )[-1]
_snake_case = name.split("." )
_snake_case = int(items[0] )
_snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_snake_case = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_snake_case = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_snake_case = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_snake_case = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = SEWConfig()
if is_finetuned:
_snake_case = model.wav_encoder.wav_model.cfg
else:
_snake_case = model.cfg
_snake_case = fs_config.conv_bias
_snake_case = eval(fs_config.conv_feature_layers )
_snake_case = [x[0] for x in conv_layers]
_snake_case = [x[1] for x in conv_layers]
_snake_case = [x[2] for x in conv_layers]
_snake_case = "gelu"
_snake_case = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
_snake_case = 0.0
_snake_case = fs_config.activation_fn.name
_snake_case = fs_config.encoder_embed_dim
_snake_case = 0.02
_snake_case = fs_config.encoder_ffn_embed_dim
_snake_case = 1E-5
_snake_case = fs_config.encoder_layerdrop
_snake_case = fs_config.encoder_attention_heads
_snake_case = fs_config.conv_pos_groups
_snake_case = fs_config.conv_pos
_snake_case = len(SCREAMING_SNAKE_CASE__ )
_snake_case = fs_config.encoder_layers
_snake_case = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_snake_case = model.cfg
_snake_case = fs_config.final_dropout
_snake_case = fs_config.layerdrop
_snake_case = fs_config.activation_dropout
_snake_case = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_snake_case = fs_config.attention_dropout
_snake_case = fs_config.dropout_input
_snake_case = fs_config.dropout
_snake_case = fs_config.mask_channel_length
_snake_case = fs_config.mask_channel_prob
_snake_case = fs_config.mask_length
_snake_case = fs_config.mask_prob
_snake_case = "Wav2Vec2FeatureExtractor"
_snake_case = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True ):
'''simple docstring'''
if is_finetuned:
_snake_case , _snake_case , _snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_snake_case , _snake_case , _snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_snake_case = SEWConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
_snake_case = convert_config(model[0] , SCREAMING_SNAKE_CASE__ )
_snake_case = model[0].eval()
_snake_case = True if config.feat_extract_norm == "layer" else False
_snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
if is_finetuned:
if dict_path:
_snake_case = Dictionary.load(SCREAMING_SNAKE_CASE__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_snake_case = target_dict.pad_index
_snake_case = target_dict.bos_index
_snake_case = target_dict.pad_index
_snake_case = target_dict.bos_index
_snake_case = target_dict.eos_index
_snake_case = len(target_dict.symbols )
_snake_case = os.path.join(SCREAMING_SNAKE_CASE__ , "vocab.json" )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(SCREAMING_SNAKE_CASE__ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(target_dict.indices , SCREAMING_SNAKE_CASE__ )
_snake_case = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=SCREAMING_SNAKE_CASE__ , )
_snake_case = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
_snake_case = SEWForCTC(SCREAMING_SNAKE_CASE__ )
else:
_snake_case = SEWModel(SCREAMING_SNAKE_CASE__ )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
__magic_name__ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__magic_name__ : int = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 672 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ : int = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[Any] = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__magic_name__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 1 |
'''simple docstring'''
def snake_case_ ( SCREAMING_SNAKE_CASE__ = 1_00 ):
'''simple docstring'''
_snake_case = (n * (n + 1) // 2) ** 2
_snake_case = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 672 |
'''simple docstring'''
import string
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = ""
for i in sequence:
_snake_case = ord(SCREAMING_SNAKE_CASE__ )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = string.ascii_letters
_snake_case = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(SCREAMING_SNAKE_CASE__ )] if c in letters else c for c in sequence )
def snake_case_ ( ):
'''simple docstring'''
from timeit import timeit
print("Running performance benchmarks..." )
_snake_case = "from string import printable ; from __main__ import atbash, atbash_slow"
print(f'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=SCREAMING_SNAKE_CASE__ )} seconds''' )
print(f'''> atbash(): {timeit("atbash(printable)" , setup=SCREAMING_SNAKE_CASE__ )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'{example} encrypted in atbash: {atbash(example)}')
benchmark()
| 672 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__magic_name__ : Tuple = logging.get_logger(__name__)
__magic_name__ : Any = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
__magic_name__ : List[str] = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
_snake_case = "lm_head"
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
_snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_snake_case = value
elif weight_type == "weight_g":
_snake_case = value
elif weight_type == "weight_v":
_snake_case = value
elif weight_type == "bias":
_snake_case = value
else:
_snake_case = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = []
_snake_case = fairseq_model.state_dict()
_snake_case = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
_snake_case = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == "group" , )
_snake_case = True
else:
for key, mapped_key in MAPPING.items():
_snake_case = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_snake_case = True
if "*" in mapped_key:
_snake_case = name.split(SCREAMING_SNAKE_CASE__ )[0].split("." )[-2]
_snake_case = mapped_key.replace("*" , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
_snake_case = "weight_g"
elif "weight_v" in name:
_snake_case = "weight_v"
elif "bias" in name:
_snake_case = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_snake_case = "weight"
else:
_snake_case = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = full_name.split("conv_layers." )[-1]
_snake_case = name.split("." )
_snake_case = int(items[0] )
_snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_snake_case = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_snake_case = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_snake_case = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_snake_case = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True ):
'''simple docstring'''
if config_path is not None:
_snake_case = UniSpeechConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
_snake_case = UniSpeechConfig()
if is_finetuned:
if dict_path:
_snake_case = Dictionary.load_from_json(SCREAMING_SNAKE_CASE__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_snake_case = target_dict.pad_index
_snake_case = target_dict.bos_index
_snake_case = target_dict.eos_index
_snake_case = len(target_dict.symbols )
_snake_case = os.path.join(SCREAMING_SNAKE_CASE__ , "vocab.json" )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(SCREAMING_SNAKE_CASE__ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
_snake_case = target_dict.indices
# fairseq has the <pad> and <s> switched
_snake_case = 42
_snake_case = 43
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_snake_case = WavaVecaPhonemeCTCTokenizer(
SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=SCREAMING_SNAKE_CASE__ , )
_snake_case = True if config.feat_extract_norm == "layer" else False
_snake_case = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
_snake_case = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
_snake_case = UniSpeechForCTC(SCREAMING_SNAKE_CASE__ )
else:
_snake_case = UniSpeechForPreTraining(SCREAMING_SNAKE_CASE__ )
if is_finetuned:
_snake_case , _snake_case , _snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
_snake_case , _snake_case , _snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_snake_case = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
hf_unispeech.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
__magic_name__ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__magic_name__ : str = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 672 |
'''simple docstring'''
import numpy as np
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 | 1 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__magic_name__ : Tuple = logging.get_logger(__name__)
__magic_name__ : Dict = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
__magic_name__ : Optional[Any] = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for attribute in key.split("." ):
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
_snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
_snake_case = value
elif weight_type == "weight_g":
_snake_case = value
elif weight_type == "weight_v":
_snake_case = value
elif weight_type == "bias":
_snake_case = value
else:
_snake_case = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = []
_snake_case = fairseq_model.state_dict()
_snake_case = hf_model.feature_extractor
for name, value in fairseq_dict.items():
_snake_case = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == "group" , )
_snake_case = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_snake_case = True
if "*" in mapped_key:
_snake_case = name.split(SCREAMING_SNAKE_CASE__ )[0].split("." )[-2]
_snake_case = mapped_key.replace("*" , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
_snake_case = "weight_g"
elif "weight_v" in name:
_snake_case = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
_snake_case = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_snake_case = "weight"
else:
_snake_case = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = full_name.split("conv_layers." )[-1]
_snake_case = name.split("." )
_snake_case = int(items[0] )
_snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
_snake_case = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
_snake_case = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
_snake_case = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
_snake_case = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
'''simple docstring'''
_snake_case = torch.load(SCREAMING_SNAKE_CASE__ )
_snake_case = WavLMConfigOrig(checkpoint["cfg"] )
_snake_case = WavLMOrig(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
_snake_case = WavLMConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
_snake_case = WavLMConfig()
_snake_case = WavLMModel(SCREAMING_SNAKE_CASE__ )
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
hf_wavlm.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
__magic_name__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__magic_name__ : str = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 672 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=lowerCamelCase , )
assert hasattr(self , "env" )
def UpperCamelCase( self , lowerCamelCase=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-single''' , instance_count=lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def UpperCamelCase( self , lowerCamelCase ):
TrainingJobAnalytics(lowerCamelCase ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
def UpperCamelCase( self ):
# create estimator
_snake_case = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_snake_case = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_snake_case = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
_snake_case = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_snake_case = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , lowerCamelCase )
| 672 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__magic_name__ : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = ['''pixel_values''']
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = True , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
_snake_case = size if size is not None else {"shortest_edge": 224}
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_snake_case = crop_size if crop_size is not None else {"height": 224, "width": 224}
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase , param_name="crop_size" )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_snake_case = image_std if image_std is not None else OPENAI_CLIP_STD
_snake_case = do_convert_rgb
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_snake_case = get_resize_output_image_size(lowerCamelCase , size=size["shortest_edge"] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
_snake_case = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowerCamelCase , param_name="size" , default_to_square=lowerCamelCase )
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(lowerCamelCase , param_name="crop_size" , default_to_square=lowerCamelCase )
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_snake_case = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_snake_case = [convert_to_rgb(lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
_snake_case = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
_snake_case = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
_snake_case = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
_snake_case = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 672 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = DistilBertTokenizer
UpperCAmelCase__ : Union[str, Any] = DistilBertTokenizerFast
UpperCAmelCase__ : List[str] = True
@slow
def UpperCamelCase( self ):
_snake_case = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
_snake_case = tokenizer.encode("sequence builders" , add_special_tokens=lowerCamelCase )
_snake_case = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCamelCase )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 672 | 1 |
'''simple docstring'''
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ : Optional[int] = logging.get_logger(__name__)
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = os.path.abspath(SCREAMING_SNAKE_CASE__ )
logger.info(f'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
_snake_case = tf.train.list_variables(SCREAMING_SNAKE_CASE__ )
_snake_case = []
_snake_case = []
_snake_case = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
_snake_case = full_name.split("/" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(f'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
_snake_case = name[1:]
# figure out how many levels deep the name is
_snake_case = 0
for _name in name:
if _name.startswith("layer_with_weights" ):
depth += 1
else:
break
layer_depth.append(SCREAMING_SNAKE_CASE__ )
# read data
_snake_case = tf.train.load_variable(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
names.append("/".join(SCREAMING_SNAKE_CASE__ ) )
arrays.append(SCREAMING_SNAKE_CASE__ )
logger.info(f'''Read a total of {len(SCREAMING_SNAKE_CASE__ ):,} layers''' )
# Sanity check
if len(set(SCREAMING_SNAKE_CASE__ ) ) != 1:
raise ValueError(f'''Found layer names with different depths (layer depth {list(set(SCREAMING_SNAKE_CASE__ ) )})''' )
_snake_case = list(set(SCREAMING_SNAKE_CASE__ ) )[0]
if layer_depth != 1:
raise ValueError(
"The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"
" heads." )
# convert layers
logger.info("Converting weights..." )
for full_name, array in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_snake_case = full_name.split("/" )
_snake_case = model
_snake_case = []
for i, m_name in enumerate(SCREAMING_SNAKE_CASE__ ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("layer_with_weights" ):
_snake_case = int(m_name.split("-" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["embeddings", "LayerNorm"] )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "embeddings" )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "LayerNorm" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["encoder", "layer", str(layer_num - 4 )] )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "encoder" )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "layer" )
_snake_case = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["pooler", "dense"] )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "pooler" )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "dense" )
elif m_name == "embeddings":
trace.append("embeddings" )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "embeddings" )
if layer_num == 0:
trace.append("word_embeddings" )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "word_embeddings" )
elif layer_num == 1:
trace.append("position_embeddings" )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "position_embeddings" )
elif layer_num == 2:
trace.append("token_type_embeddings" )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "token_type_embeddings" )
else:
raise ValueError(f'''Unknown embedding layer with name {full_name}''' )
trace.append("weight" )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "weight" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["attention", "self"] )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "attention" )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "self" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["attention", "output", "LayerNorm"] )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "attention" )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "output" )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "LayerNorm" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["attention", "output", "dense"] )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "attention" )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "output" )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "dense" )
elif m_name == "_output_dense":
# output dense
trace.extend(["output", "dense"] )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "output" )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "dense" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["output", "LayerNorm"] )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "output" )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "LayerNorm" )
elif m_name == "_key_dense":
# attention key
trace.append("key" )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "key" )
elif m_name == "_query_dense":
# attention query
trace.append("query" )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "query" )
elif m_name == "_value_dense":
# attention value
trace.append("value" )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "value" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["intermediate", "dense"] )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "intermediate" )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "dense" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("output" )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "output" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("bias" )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "bias" )
elif m_name in ["kernel", "gamma"]:
trace.append("weight" )
_snake_case = getattr(SCREAMING_SNAKE_CASE__ , "weight" )
else:
logger.warning(f'''Ignored {m_name}''' )
# for certain layers reshape is necessary
_snake_case = ".".join(SCREAMING_SNAKE_CASE__ )
if re.match(r"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)" , SCREAMING_SNAKE_CASE__ ) or re.match(
r"(\S+)\.attention\.output\.dense\.weight" , SCREAMING_SNAKE_CASE__ ):
_snake_case = array.reshape(pointer.data.shape )
if "kernel" in full_name:
_snake_case = array.transpose()
if pointer.shape == array.shape:
_snake_case = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(
f'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
f''' {array.shape}''' )
logger.info(f'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
logger.info(f'''Loading model based on config from {config_path}...''' )
_snake_case = BertConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
_snake_case = BertModel(SCREAMING_SNAKE_CASE__ )
# Load weights from checkpoint
logger.info(f'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
logger.info(f'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
__magic_name__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model (must include filename).""",
)
__magic_name__ : List[str] = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 672 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__magic_name__ : Optional[int] = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Optional[int] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__magic_name__ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 1 |
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case , _snake_case = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
_snake_case = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__magic_name__ : Optional[int] = imread("""image_data/lena.jpg""", 1)
# convert to its negative
__magic_name__ : Any = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 672 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ : Union[str, Any] = logging.get_logger(__name__)
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
_snake_case = "backbone." if is_semantic else ""
_snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', "beit.embeddings.cls_token"),
(f'''{prefix}patch_embed.proj.weight''', "beit.embeddings.patch_embeddings.projection.weight"),
(f'''{prefix}patch_embed.proj.bias''', "beit.embeddings.patch_embeddings.projection.bias"),
(f'''{prefix}pos_embed''', "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_snake_case = "backbone." if is_semantic else ""
# queries, keys and values
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
_snake_case = in_proj_weight[
: config.hidden_size, :
]
_snake_case = q_bias
_snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case = in_proj_weight[
-config.hidden_size :, :
]
_snake_case = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
_snake_case = gamma_a
_snake_case = gamma_a
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = dct.pop(SCREAMING_SNAKE_CASE__ )
_snake_case = val
def snake_case_ ( ):
'''simple docstring'''
_snake_case = "http://images.cocodataset.org/val2017/000000039769.jpg"
_snake_case = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
_snake_case = False if "rvlcdip" in checkpoint_url else True
_snake_case = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE__ , use_mask_token=SCREAMING_SNAKE_CASE__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
_snake_case = 10_24
_snake_case = 40_96
_snake_case = 24
_snake_case = 16
# labels
if "rvlcdip" in checkpoint_url:
_snake_case = 16
_snake_case = "huggingface/label-files"
_snake_case = "rvlcdip-id2label.json"
_snake_case = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="dataset" ) , "r" ) )
_snake_case = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
_snake_case = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location="cpu" )["model"]
_snake_case = create_rename_keys(SCREAMING_SNAKE_CASE__ , has_lm_head=SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , has_lm_head=SCREAMING_SNAKE_CASE__ )
# load HuggingFace model
_snake_case = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE__ ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# Check outputs on an image
_snake_case = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE__ )
_snake_case = prepare_img()
_snake_case = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" )
_snake_case = encoding["pixel_values"]
_snake_case = model(SCREAMING_SNAKE_CASE__ )
_snake_case = outputs.logits
# verify logits
_snake_case = [1, 16] if "rvlcdip" in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE__ ), "Shape of logits not as expected"
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
if has_lm_head:
_snake_case = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
_snake_case = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
if __name__ == "__main__":
__magic_name__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
__magic_name__ : Dict = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 672 | 1 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_ddp.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.7, '''eval_loss''': 0.6},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf_dist.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.p3.16xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.6, '''eval_loss''': 0.7},
},
] )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=lowerCamelCase , )
assert hasattr(self , "env" )
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = F'''{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'''
# distributed data settings
_snake_case = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=lowerCamelCase , instance_count=lowerCamelCase , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase , py_version="py36" , )
def UpperCamelCase( self , lowerCamelCase ):
TrainingJobAnalytics(lowerCamelCase ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def UpperCamelCase( self , lowerCamelCase ):
# create estimator
_snake_case = self.create_estimator(lowerCamelCase )
# run training
estimator.fit()
# result dataframe
_snake_case = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_snake_case = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
_snake_case = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_snake_case = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , lowerCamelCase )
| 672 |
'''simple docstring'''
import math
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_snake_case = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1 , **SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = factor * value
_snake_case = value
while not is_prime(SCREAMING_SNAKE_CASE__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ )
return value
| 672 | 1 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = int(np.ceil((x_end - xa) / step_size ) )
_snake_case = np.zeros((n + 1,) )
_snake_case = ya
_snake_case = xa
for k in range(SCREAMING_SNAKE_CASE__ ):
_snake_case = y[k] + step_size * ode_func(SCREAMING_SNAKE_CASE__ , y[k] )
_snake_case = y[k] + (
(step_size / 2) * (ode_func(SCREAMING_SNAKE_CASE__ , y[k] ) + ode_func(x + step_size , SCREAMING_SNAKE_CASE__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ : Dict = {
"""configuration_pix2struct""": [
"""PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Pix2StructConfig""",
"""Pix2StructTextConfig""",
"""Pix2StructVisionConfig""",
],
"""processing_pix2struct""": ["""Pix2StructProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[str] = ["""Pix2StructImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[Any] = [
"""PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Pix2StructPreTrainedModel""",
"""Pix2StructForConditionalGeneration""",
"""Pix2StructVisionModel""",
"""Pix2StructTextModel""",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__magic_name__ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 1 |
'''simple docstring'''
from collections.abc import Sequence
from queue import Queue
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None ):
_snake_case = start
_snake_case = end
_snake_case = val
_snake_case = (start + end) // 2
_snake_case = left
_snake_case = right
def __repr__( self ):
return F'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase ):
_snake_case = collection
_snake_case = function
if self.collection:
_snake_case = self._build_tree(0 , len(lowerCamelCase ) - 1 )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
self._update_tree(self.root , lowerCamelCase , lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
return self._query_range(self.root , lowerCamelCase , lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
if start == end:
return SegmentTreeNode(lowerCamelCase , lowerCamelCase , self.collection[start] )
_snake_case = (start + end) // 2
_snake_case = self._build_tree(lowerCamelCase , lowerCamelCase )
_snake_case = self._build_tree(mid + 1 , lowerCamelCase )
return SegmentTreeNode(lowerCamelCase , lowerCamelCase , self.fn(left.val , right.val ) , lowerCamelCase , lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if node.start == i and node.end == i:
_snake_case = val
return
if i <= node.mid:
self._update_tree(node.left , lowerCamelCase , lowerCamelCase )
else:
self._update_tree(node.right , lowerCamelCase , lowerCamelCase )
_snake_case = self.fn(node.left.val , node.right.val )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , lowerCamelCase , lowerCamelCase )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , lowerCamelCase , node.mid ) , self._query_range(node.right , node.mid + 1 , lowerCamelCase ) , )
else:
# range in right child tree
return self._query_range(node.right , lowerCamelCase , lowerCamelCase )
def UpperCamelCase( self ):
if self.root is not None:
_snake_case = Queue()
queue.put(self.root )
while not queue.empty():
_snake_case = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("""*""" * 50)
__magic_name__ : int = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 672 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__magic_name__ : Optional[int] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = ['''pixel_values''']
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
_snake_case = size if size is not None else {"shortest_edge": 256}
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_snake_case = crop_size if crop_size is not None else {"height": 224, "width": 224}
_snake_case = get_size_dict(lowerCamelCase )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = do_center_crop
_snake_case = crop_size
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_snake_case = get_resize_output_image_size(lowerCamelCase , size=size["shortest_edge"] , default_to_square=lowerCamelCase )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
_snake_case = get_size_dict(lowerCamelCase )
return center_crop(lowerCamelCase , size=(size["height"], size["width"]) , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
_snake_case = resample if resample is not None else self.resample
_snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
_snake_case = crop_size if crop_size is not None else self.crop_size
_snake_case = get_size_dict(lowerCamelCase )
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
_snake_case = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_center_crop:
_snake_case = [self.center_crop(image=lowerCamelCase , size=lowerCamelCase ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
_snake_case = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
_snake_case = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 672 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : List[str] = logging.get_logger(__name__)
__magic_name__ : int = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = '''realm'''
def __init__( self , lowerCamelCase=30_522 , lowerCamelCase=768 , lowerCamelCase=128 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=8 , lowerCamelCase=3_072 , lowerCamelCase="gelu_new" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=256 , lowerCamelCase=10 , lowerCamelCase=1e-3 , lowerCamelCase=5 , lowerCamelCase=320 , lowerCamelCase=13_353_718 , lowerCamelCase=5_000 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , **lowerCamelCase , ):
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
# Common config
_snake_case = vocab_size
_snake_case = max_position_embeddings
_snake_case = hidden_size
_snake_case = retriever_proj_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = num_candidates
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = initializer_range
_snake_case = type_vocab_size
_snake_case = layer_norm_eps
# Reader config
_snake_case = span_hidden_size
_snake_case = max_span_width
_snake_case = reader_layer_norm_eps
_snake_case = reader_beam_size
_snake_case = reader_seq_len
# Retrieval config
_snake_case = num_block_records
_snake_case = searcher_beam_size
| 672 |
'''simple docstring'''
import baseaa
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return baseaa.aaadecode(SCREAMING_SNAKE_CASE__ ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ : Union[str, Any] = logging.get_logger(__name__)
__magic_name__ : Optional[int] = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : str = '''deta'''
UpperCAmelCase__ : Union[str, Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowerCamelCase=None , lowerCamelCase=900 , lowerCamelCase=2_048 , lowerCamelCase=6 , lowerCamelCase=2_048 , lowerCamelCase=8 , lowerCamelCase=6 , lowerCamelCase=1_024 , lowerCamelCase=8 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase="relu" , lowerCamelCase=256 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1.0 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase="sine" , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=4 , lowerCamelCase=True , lowerCamelCase=300 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=1 , lowerCamelCase=5 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=1 , lowerCamelCase=5 , lowerCamelCase=2 , lowerCamelCase=0.1 , lowerCamelCase=0.25 , **lowerCamelCase , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_snake_case = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(lowerCamelCase , lowerCamelCase ):
_snake_case = backbone_config.pop("model_type" )
_snake_case = CONFIG_MAPPING[backbone_model_type]
_snake_case = config_class.from_dict(lowerCamelCase )
_snake_case = backbone_config
_snake_case = num_queries
_snake_case = max_position_embeddings
_snake_case = d_model
_snake_case = encoder_ffn_dim
_snake_case = encoder_layers
_snake_case = encoder_attention_heads
_snake_case = decoder_ffn_dim
_snake_case = decoder_layers
_snake_case = decoder_attention_heads
_snake_case = dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = activation_function
_snake_case = init_std
_snake_case = init_xavier_std
_snake_case = encoder_layerdrop
_snake_case = auxiliary_loss
_snake_case = position_embedding_type
# deformable attributes
_snake_case = num_feature_levels
_snake_case = encoder_n_points
_snake_case = decoder_n_points
_snake_case = two_stage
_snake_case = two_stage_num_proposals
_snake_case = with_box_refine
_snake_case = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
_snake_case = class_cost
_snake_case = bbox_cost
_snake_case = giou_cost
# Loss coefficients
_snake_case = mask_loss_coefficient
_snake_case = dice_loss_coefficient
_snake_case = bbox_loss_coefficient
_snake_case = giou_loss_coefficient
_snake_case = eos_coefficient
_snake_case = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase )
@property
def UpperCamelCase( self ):
return self.encoder_attention_heads
@property
def UpperCamelCase( self ):
return self.d_model
def UpperCamelCase( self ):
_snake_case = copy.deepcopy(self.__dict__ )
_snake_case = self.backbone_config.to_dict()
_snake_case = self.__class__.model_type
return output
| 672 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 672 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : int = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[Any] = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__magic_name__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class __SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase( self ):
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=lowerCamelCase , )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def UpperCamelCase( self ):
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=lowerCamelCase , )
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCamelCase )
def snake_case_ ( ):
'''simple docstring'''
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def snake_case_ ( ):
'''simple docstring'''
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
@require_beam
def UpperCamelCase( self ):
_snake_case = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCamelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCamelCase( self ):
import apache_beam as beam
_snake_case = beam.io.parquetio.WriteToParquet
_snake_case = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCamelCase , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
_snake_case = partial(lowerCamelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def UpperCamelCase( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = DummyBeamDataset(cache_dir=lowerCamelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def UpperCamelCase( self ):
_snake_case = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
_snake_case = NestedBeamDataset(cache_dir=lowerCamelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , F'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
_snake_case = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCamelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCamelCase )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCamelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 672 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__magic_name__ : Union[str, Any] = logging.get_logger(__name__)
__magic_name__ : List[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
__magic_name__ : List[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__magic_name__ : List[Any] = {
"""allenai/led-base-16384""": 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def snake_case_ ( ):
'''simple docstring'''
_snake_case = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
_snake_case = bs[:]
_snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(SCREAMING_SNAKE_CASE__ )
cs.append(2**8 + n )
n += 1
_snake_case = [chr(SCREAMING_SNAKE_CASE__ ) for n in cs]
return dict(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = set()
_snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_snake_case = char
return pairs
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Any = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="replace" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase=False , **lowerCamelCase , ):
_snake_case = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else bos_token
_snake_case = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token
_snake_case = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else sep_token
_snake_case = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else cls_token
_snake_case = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token
_snake_case = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token
super().__init__(
errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
_snake_case = json.load(lowerCamelCase )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = errors # how to handle errors in decoding
_snake_case = bytes_to_unicode()
_snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase , encoding="utf-8" ) as merges_handle:
_snake_case = merges_handle.read().split("\n" )[1:-1]
_snake_case = [tuple(merge.split() ) for merge in bpe_merges]
_snake_case = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
_snake_case = {}
_snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_snake_case = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCamelCase( self ):
return len(self.encoder )
def UpperCamelCase( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase( self , lowerCamelCase ):
if token in self.cache:
return self.cache[token]
_snake_case = tuple(lowerCamelCase )
_snake_case = get_pairs(lowerCamelCase )
if not pairs:
return token
while True:
_snake_case = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_snake_case , _snake_case = bigram
_snake_case = []
_snake_case = 0
while i < len(lowerCamelCase ):
try:
_snake_case = word.index(lowerCamelCase , lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_snake_case = j
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_snake_case = tuple(lowerCamelCase )
_snake_case = new_word
if len(lowerCamelCase ) == 1:
break
else:
_snake_case = get_pairs(lowerCamelCase )
_snake_case = " ".join(lowerCamelCase )
_snake_case = word
return word
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = []
for token in re.findall(self.pat , lowerCamelCase ):
_snake_case = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase ).split(" " ) )
return bpe_tokens
def UpperCamelCase( self , lowerCamelCase ):
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def UpperCamelCase( self , lowerCamelCase ):
return self.decoder.get(lowerCamelCase )
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = "".join(lowerCamelCase )
_snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase = None ):
if not os.path.isdir(lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_snake_case = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" )
_snake_case = 0
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
_snake_case = token_index
writer.write(" ".join(lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case = [self.cls_token_id]
_snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase )) + [1]
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase )) + [1]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase = None ):
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase=False , **lowerCamelCase ):
_snake_case = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase ) > 0 and not text[0].isspace()):
_snake_case = " " + text
return (text, kwargs)
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = PaddingStrategy.DO_NOT_PAD , lowerCamelCase = None , lowerCamelCase = None , ):
_snake_case = super()._pad(
encoded_inputs=lowerCamelCase , max_length=lowerCamelCase , padding_strategy=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
_snake_case = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_snake_case = len(encoded_inputs["global_attention_mask"] ) != len(lowerCamelCase )
if needs_to_be_padded:
_snake_case = len(lowerCamelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_snake_case = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_snake_case = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 672 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__magic_name__ : Optional[int] = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self ):
_snake_case = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
_snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
image=lowerCamelCase , generator=lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
_snake_case = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_snake_case = np.array([0.0441, 0.0469, 0.0507, 0.0575, 0.0632, 0.0650, 0.0865, 0.0909, 0.0945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 672 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ : Optional[int] = {
"""configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Any = [
"""GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GraphormerForGraphClassification""",
"""GraphormerModel""",
"""GraphormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
__magic_name__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = features.copy() if features else default_expected_features
_snake_case = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_snake_case = text_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_snake_case = [text_path]
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=("train",) ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
_snake_case = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case = TextDatasetReader({"train": text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
_snake_case = {"text": "string"}
_snake_case = features.copy() if features else default_expected_features
_snake_case = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case = TextDatasetReader({"train": text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if split:
_snake_case = {split: text_path}
else:
_snake_case = "train"
_snake_case = {"train": text_path, "test": text_path}
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 672 | 1 |
'''simple docstring'''
from __future__ import annotations
__magic_name__ : List[str] = list[tuple[int, int]]
__magic_name__ : Union[str, Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__magic_name__ : Any = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
_snake_case = pos_x
_snake_case = pos_y
_snake_case = (pos_y, pos_x)
_snake_case = goal_x
_snake_case = goal_y
_snake_case = g_cost
_snake_case = parent
_snake_case = self.calculate_heuristic()
def UpperCamelCase( self ):
_snake_case = abs(self.pos_x - self.goal_x )
_snake_case = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , lowerCamelCase ):
return self.f_cost < other.f_cost
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase ):
_snake_case = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase )
_snake_case = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , lowerCamelCase )
_snake_case = [self.start]
_snake_case = []
_snake_case = False
def UpperCamelCase( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_snake_case = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
_snake_case = True
return self.retrace_path(lowerCamelCase )
self.closed_nodes.append(lowerCamelCase )
_snake_case = self.get_successors(lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase )
else:
# retrieve the best current path
_snake_case = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase )
else:
self.open_nodes.append(lowerCamelCase )
if not self.reached:
return [self.start.pos]
return None
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = []
for action in delta:
_snake_case = parent.pos_x + action[1]
_snake_case = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase , lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase , ) )
return successors
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = node
_snake_case = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_snake_case = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__magic_name__ : List[str] = (0, 0)
__magic_name__ : str = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
__magic_name__ : Dict = GreedyBestFirst(init, goal)
__magic_name__ : Dict = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__magic_name__ : Optional[Any] = 2
for elem in grid:
print(elem)
| 672 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : Any = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Dict = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__magic_name__ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 | 1 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__magic_name__ : int = TypeVar("""KT""")
__magic_name__ : Tuple = TypeVar("""VT""")
class __SCREAMING_SNAKE_CASE ( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self , lowerCamelCase = "root" , lowerCamelCase = None ):
_snake_case = key
_snake_case = value
_snake_case = []
def __repr__( self ):
return F'''Node({self.key}: {self.value})'''
@property
def UpperCamelCase( self ):
return len(self.forward )
class __SCREAMING_SNAKE_CASE ( Generic[KT, VT] ):
'''simple docstring'''
def __init__( self , lowerCamelCase = 0.5 , lowerCamelCase = 16 ):
_snake_case = Node[KT, VT]()
_snake_case = 0
_snake_case = p
_snake_case = max_level
def __str__( self ):
_snake_case = list(self )
if len(lowerCamelCase ) == 0:
return F'''SkipList(level={self.level})'''
_snake_case = max((len(str(lowerCamelCase ) ) for item in items) , default=4 )
_snake_case = max(lowerCamelCase , 4 ) + 4
_snake_case = self.head
_snake_case = []
_snake_case = node.forward.copy()
lines.append(F'''[{node.key}]'''.ljust(lowerCamelCase , "-" ) + "* " * len(lowerCamelCase ) )
lines.append(" " * label_size + "| " * len(lowerCamelCase ) )
while len(node.forward ) != 0:
_snake_case = node.forward[0]
lines.append(
F'''[{node.key}]'''.ljust(lowerCamelCase , "-" )
+ " ".join(str(n.key ) if n.key == node.key else "|" for n in forwards ) )
lines.append(" " * label_size + "| " * len(lowerCamelCase ) )
_snake_case = node.forward
lines.append("None".ljust(lowerCamelCase ) + "* " * len(lowerCamelCase ) )
return F'''SkipList(level={self.level})\n''' + "\n".join(lowerCamelCase )
def __iter__( self ):
_snake_case = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
_snake_case = node.forward[0]
def UpperCamelCase( self ):
_snake_case = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = []
_snake_case = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
_snake_case = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(lowerCamelCase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def UpperCamelCase( self , lowerCamelCase ):
_snake_case , _snake_case = self._locate_node(lowerCamelCase )
if node is not None:
for i, update_node in enumerate(lowerCamelCase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
_snake_case = node.forward[i]
else:
_snake_case = update_node.forward[:i]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
_snake_case , _snake_case = self._locate_node(lowerCamelCase )
if node is not None:
_snake_case = value
else:
_snake_case = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , lowerCamelCase ):
update_vector.append(self.head )
_snake_case = level
_snake_case = Node(lowerCamelCase , lowerCamelCase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(lowerCamelCase )
else:
_snake_case = new_node
def UpperCamelCase( self , lowerCamelCase ):
_snake_case , _snake_case = self._locate_node(lowerCamelCase )
if node is not None:
return node.value
return None
def snake_case_ ( ):
'''simple docstring'''
_snake_case = SkipList()
skip_list.insert("Key1" , 3 )
skip_list.insert("Key2" , 12 )
skip_list.insert("Key3" , 41 )
skip_list.insert("Key4" , -19 )
_snake_case = skip_list.head
_snake_case = {}
while node.level != 0:
_snake_case = node.forward[0]
_snake_case = node.value
assert len(SCREAMING_SNAKE_CASE__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def snake_case_ ( ):
'''simple docstring'''
_snake_case = SkipList()
skip_list.insert("Key1" , 10 )
skip_list.insert("Key1" , 12 )
skip_list.insert("Key5" , 7 )
skip_list.insert("Key7" , 10 )
skip_list.insert("Key10" , 5 )
skip_list.insert("Key7" , 7 )
skip_list.insert("Key5" , 5 )
skip_list.insert("Key10" , 10 )
_snake_case = skip_list.head
_snake_case = {}
while node.level != 0:
_snake_case = node.forward[0]
_snake_case = node.value
if len(SCREAMING_SNAKE_CASE__ ) != 4:
print()
assert len(SCREAMING_SNAKE_CASE__ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def snake_case_ ( ):
'''simple docstring'''
_snake_case = SkipList()
assert skip_list.find("Some key" ) is None
def snake_case_ ( ):
'''simple docstring'''
_snake_case = SkipList()
skip_list.insert("Key2" , 20 )
assert skip_list.find("Key2" ) == 20
skip_list.insert("Some Key" , 10 )
skip_list.insert("Key2" , 8 )
skip_list.insert("V" , 13 )
assert skip_list.find("Y" ) is None
assert skip_list.find("Key2" ) == 8
assert skip_list.find("Some Key" ) == 10
assert skip_list.find("V" ) == 13
def snake_case_ ( ):
'''simple docstring'''
_snake_case = SkipList()
skip_list.delete("Some key" )
assert len(skip_list.head.forward ) == 0
def snake_case_ ( ):
'''simple docstring'''
_snake_case = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("Key2" ) is None
def snake_case_ ( ):
'''simple docstring'''
_snake_case = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 14 )
skip_list.insert("Key2" , 15 )
skip_list.delete("V" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) == 14
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("X" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) == 12
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key1" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) == 15
skip_list.delete("Key2" )
assert skip_list.find("V" ) is None
assert skip_list.find("X" ) is None
assert skip_list.find("Key1" ) is None
assert skip_list.find("Key2" ) is None
def snake_case_ ( ):
'''simple docstring'''
_snake_case = SkipList()
skip_list.insert("Key1" , 12 )
skip_list.insert("V" , 13 )
skip_list.insert("X" , 1_42 )
skip_list.insert("Key2" , 15 )
skip_list.delete("X" )
def traverse_keys(SCREAMING_SNAKE_CASE__ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(SCREAMING_SNAKE_CASE__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def snake_case_ ( ):
'''simple docstring'''
def is_sorted(SCREAMING_SNAKE_CASE__ ):
return all(next_item >= item for item, next_item in zip(SCREAMING_SNAKE_CASE__ , lst[1:] ) )
_snake_case = SkipList()
for i in range(10 ):
skip_list.insert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert is_sorted(list(SCREAMING_SNAKE_CASE__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(SCREAMING_SNAKE_CASE__ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(SCREAMING_SNAKE_CASE__ ) )
def snake_case_ ( ):
'''simple docstring'''
for _ in range(1_00 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def snake_case_ ( ):
'''simple docstring'''
_snake_case = SkipList()
skip_list.insert(2 , "2" )
skip_list.insert(4 , "4" )
skip_list.insert(6 , "4" )
skip_list.insert(4 , "5" )
skip_list.insert(8 , "4" )
skip_list.insert(9 , "4" )
skip_list.delete(4 )
print(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 672 |
'''simple docstring'''
import math
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return math.pow(SCREAMING_SNAKE_CASE__ , 2 ) - a
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return 2 * x
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = 2.0
while start <= a:
_snake_case = math.pow(SCREAMING_SNAKE_CASE__ , 2 )
return start
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 99_99 , SCREAMING_SNAKE_CASE__ = 0.00000000000001 ):
'''simple docstring'''
if a < 0:
raise ValueError("math domain error" )
_snake_case = get_initial_point(SCREAMING_SNAKE_CASE__ )
for _ in range(SCREAMING_SNAKE_CASE__ ):
_snake_case = value
_snake_case = value - fx(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / fx_derivative(SCREAMING_SNAKE_CASE__ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 672 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 672 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : Optional[int] = logging.get_logger(__name__)
__magic_name__ : Optional[int] = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = '''git_vision_model'''
def __init__( self , lowerCamelCase=768 , lowerCamelCase=3_072 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3 , lowerCamelCase=224 , lowerCamelCase=16 , lowerCamelCase="quick_gelu" , lowerCamelCase=1e-5 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
_snake_case = hidden_size
_snake_case = intermediate_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = num_channels
_snake_case = patch_size
_snake_case = image_size
_snake_case = initializer_range
_snake_case = attention_dropout
_snake_case = layer_norm_eps
_snake_case = hidden_act
@classmethod
def UpperCamelCase( cls , lowerCamelCase , **lowerCamelCase ):
cls._set_token_in_kwargs(lowerCamelCase )
_snake_case , _snake_case = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
_snake_case = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = '''git'''
def __init__( self , lowerCamelCase=None , lowerCamelCase=30_522 , lowerCamelCase=768 , lowerCamelCase=6 , lowerCamelCase=12 , lowerCamelCase=3_072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=1_024 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=0 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=101 , lowerCamelCase=102 , lowerCamelCase=None , **lowerCamelCase , ):
super().__init__(bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , pad_token_id=lowerCamelCase , **lowerCamelCase )
if vision_config is None:
_snake_case = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
_snake_case = GitVisionConfig(**lowerCamelCase )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = position_embedding_type
_snake_case = use_cache
_snake_case = tie_word_embeddings
_snake_case = num_image_with_embedding
_snake_case = bos_token_id
_snake_case = eos_token_id
def UpperCamelCase( self ):
_snake_case = copy.deepcopy(self.__dict__ )
_snake_case = self.vision_config.to_dict()
_snake_case = self.__class__.model_type
return output
| 672 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.